-Fixed a problem parsing section headers. Added an extra check for section headers containing only null bytes.
-Improved the detection of corrupt resource names.
-Fixed misspelled words.

git-svn-id: http://pefile.googlecode.com/svn/trunk@121 8842bc4e-7134-0410-8230-5dc5194fb5c1
diff --git a/pefile.py b/pefile.py
index 5725c25..f4ae790 100644
--- a/pefile.py
+++ b/pefile.py
@@ -69,7 +69,7 @@
 # This will set a maximum length of a string to be retrieved from the file.
 # It's there to prevent loading massive amounts of data from memory mapped
 # files. Strings longer than 1MB should be rather rare.
-MAX_STRING_LENGTH = 0x100000 # 2^20 
+MAX_STRING_LENGTH = 0x100000 # 2^20
 
 IMAGE_DOS_SIGNATURE             = 0x5A4D
 IMAGE_DOSZM_SIGNATURE           = 0x4D5A
@@ -126,7 +126,7 @@
 
 IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in
     image_characteristics]+image_characteristics)
-    
+
 
 section_characteristics = [
     ('IMAGE_SCN_CNT_CODE',                  0x00000020),
@@ -288,7 +288,7 @@
     ('RT_MANIFEST',        24) ]
 
 RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type)
-    
+
 
 # Language definitions
 lang = [
@@ -506,13 +506,13 @@
         SUBLANG[ sublang_value ].append( sublang_name )
     else:
         SUBLANG[ sublang_value ] = [ sublang_name ]
-        
+
 # Resolve a sublang name given the main lang name
 #
 def get_sublang_name_for_lang( lang_value, sublang_value ):
     lang_name = LANG.get(lang_value, '*unknown*')
     for sublang_name in SUBLANG.get(sublang_value, list()):
-        # if the main language is a substring of sublang's name, then 
+        # if the main language is a substring of sublang's name, then
         # return that
         if lang_name in sublang_name:
             return sublang_name
@@ -526,11 +526,11 @@
     i = 0
     error_count = 0
     while i < len(data):
-        
+
         data_slice = data[i:i + 2]
         if len(data_slice) < 2:
             break
-        
+
         len_ = struct.unpack("<h", data_slice)[0]
         i += 2
         if len_ != 0 and 0 <= len_*2 <= len(data):
@@ -547,11 +547,11 @@
 
 def retrieve_flags(flag_dict, flag_filter):
     """Read the flags from a dictionary and return them in a usable form.
-    
+
     Will return a list of (flag, value) for all flags in "flag_dict"
     matching the filter "flag_filter".
     """
-    
+
     return [(f[0], f[1]) for f in flag_dict.items() if
             isinstance(f[0], str) and f[0].startswith(flag_filter)]
 
@@ -588,146 +588,143 @@
     that might be plain Unicode or Pascal. A list of strings will be
     wrapped on it with the hope the overlappings will help make the
     decision about their type."""
-    
+
     def __init__(self, pe, rva_ptr):
         self.pe = pe
         self.rva_ptr = rva_ptr
         self.string = None
-        
-    
+
+
     def get_rva(self):
         """Get the RVA of the string."""
-        
+
         return self.rva_ptr
-        
-    
+
+
     def __str__(self):
         """Return the escaped ASCII representation of the string."""
-        
+
         def convert_char(char):
             if char in string.printable:
                 return char
             else:
                 return r'\x%02x' % ord(char)
-        
+
         if self.string:
             return ''.join([convert_char(c) for c in self.string])
-        
+
         return ''
-        
-    
+
+
     def invalidate(self):
         """Make this instance None, to express it's no known string type."""
-        
+
         self = None
-        
-    
+
+
     def render_pascal_16(self):
-        
+
         self.string = self.pe.get_string_u_at_rva(
             self.rva_ptr+2,
-            max_length=self.__get_pascal_16_length())
-        
-    
+            max_length=self.get_pascal_16_length())
+
+
     def ask_pascal_16(self, next_rva_ptr):
         """The next RVA is taken to be the one immediately following this one.
-        
+
         Such RVA could indicate the natural end of the string and will be checked
         with the possible length contained in the first word.
         """
-        
-        length = self.__get_pascal_16_length()
-        
+
+        length = self.get_pascal_16_length()
+
         if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
             self.length = length
             return True
-        
+
         return False
-        
-    
-    def __get_pascal_16_length(self):
-        
+
+
+    def get_pascal_16_length(self):
+
         return self.__get_word_value_at_rva(self.rva_ptr)
-        
-    
+
+
     def __get_word_value_at_rva(self, rva):
-        
+
         try:
             data = self.pe.get_data(self.rva_ptr, 2)
         except PEFormatError, e:
             return False
-        
+
         if len(data)<2:
             return False
-        
+
         return struct.unpack('<H', data)[0]
-        
-    
-    #def render_pascal_8(self):
-    #    """"""
-        
-    
+
+
+
     def ask_unicode_16(self, next_rva_ptr):
         """The next RVA is taken to be the one immediately following this one.
-        
+
         Such RVA could indicate the natural end of the string and will be checked
         to see if there's a Unicode NULL character there.
         """
-        
+
         if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
             self.length = next_rva_ptr - self.rva_ptr
             return True
-        
+
         return False
-        
-    
+
+
     def render_unicode_16(self):
         """"""
-        
+
         self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
-    
+
 
 class PEFormatError(Exception):
     """Generic PE format error exception."""
-    
+
     def __init__(self, value):
         self.value = value
-    
+
     def __str__(self):
         return repr(self.value)
 
 
 class Dump:
     """Convenience class for dumping the PE information."""
-    
+
     def __init__(self):
         self.text = list()
-        
-    
+
+
     def add_lines(self, txt, indent=0):
         """Adds a list of lines.
-        
+
         The list can be indented with the optional argument 'indent'.
         """
         for line in txt:
             self.add_line(line, indent)
-            
-    
+
+
     def add_line(self, txt, indent=0):
         """Adds a line.
-        
+
         The line can be indented with the optional argument 'indent'.
         """
-        
+
         self.add(txt+'\n', indent)
-        
-    
+
+
     def add(self, txt, indent=0):
         """Adds some text, no newline will be appended.
-        
+
         The text can be indented with the optional argument 'indent'.
         """
-        
+
         if isinstance(txt, unicode):
             try:
                 txt = str(txt)
@@ -738,45 +735,45 @@
                         s.append(str(c))
                     except UnicodeEncodeError:
                         s.append(repr(c))
-                
+
                 txt = ''.join(s)
-        
+
         self.text.append( ' '*indent + txt )
-        
-    
+
+
     def add_header(self, txt):
         """Adds a header element."""
-        
+
         self.add_line('-'*10+txt+'-'*10+'\n')
-        
-    
+
+
     def add_newline(self):
         """Adds a newline."""
-        
+
         self.text.append( '\n' )
-        
-    
+
+
     def get_text(self):
         """Get the text in its current state."""
-        
+
         return ''.join( self.text )
 
 
 STRUCT_SIZEOF_TYPES = {
-    'x': 1, 'c': 1, 'b': 1, 'B': 1, 
-    'h': 2, 'H': 2, 
+    'x': 1, 'c': 1, 'b': 1, 'B': 1,
+    'h': 2, 'H': 2,
     'i': 4, 'I': 4, 'l': 4, 'L': 4, 'f': 4,
     'q': 8, 'Q': 8, 'd': 8,
     's': 1 }
 
 class Structure:
     """Prepare structure object to extract members from data.
-    
+
     Format is a list containing definitions for the elements
     of the structure.
     """
-    
-    
+
+
     def __init__(self, format, name=None, file_offset=None):
         # Format is forced little endian, for big endian non Intel platforms
         self.__format__ = '<'
@@ -792,11 +789,11 @@
             self.name = name
         else:
             self.name = format[0]
-            
-    
+
+
     def __get_format__(self):
         return self.__format__
-        
+
     def get_field_absolute_offset(self, field_name):
         """Return the offset within the field for the requested field in the structure."""
         return self.__file_offset__ + self.__field_offsets__[field_name]
@@ -804,18 +801,18 @@
     def get_field_relative_offset(self, field_name):
         """Return the offset within the structure for the requested field."""
         return self.__field_offsets__[field_name]
-    
+
     def get_file_offset(self):
         return self.__file_offset__
-    
+
     def set_file_offset(self, offset):
         self.__file_offset__ = offset
-    
+
     def all_zeroes(self):
-        """Returns true is the unpacked data is all zeroes."""
-        
+        """Returns true is the unpacked data is all zeros."""
+
         return self.__all_zeroes__
-                
+
     def sizeof_type(self, t):
         count = 1
         _t = t
@@ -824,15 +821,15 @@
             count = int( ''.join([d for d in t if d in string.digits]) )
             _t = ''.join([d for d in t if d not in string.digits])
         return STRUCT_SIZEOF_TYPES[_t] * count
-    
+
     def __set_format__(self, format):
-        
+
         offset = 0
         for elm in format:
             if ',' in elm:
                 elm_type, elm_name = elm.split(',', 1)
                 self.__format__ += elm_type
-                
+
                 elm_names = elm_name.split(',')
                 names = []
                 for elm_name in elm_names:
@@ -849,21 +846,21 @@
                 # value might have different names, so each key has a list of
                 # all the possible members referring to the data.
                 self.__keys__.append(names)
-        
+
         self.__format_length__ = struct.calcsize(self.__format__)
-        
-    
+
+
     def sizeof(self):
         """Return size of the structure."""
-        
+
         return self.__format_length__
-        
-    
+
+
     def __unpack__(self, data):
-        
+
         if len(data) > self.__format_length__:
             data = data[:self.__format_length__]
-        
+
         # OC Patch:
         # Some malware have incorrect header lengths.
         # Fail gracefully if this occurs
@@ -871,57 +868,57 @@
         #
         elif len(data) < self.__format_length__:
             raise PEFormatError('Data length less than expected header length.')
-            
-        
+
+
         if data.count(chr(0)) == len(data):
             self.__all_zeroes__ = True
-        
+
         self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
         for i in xrange(len(self.__unpacked_data_elms__)):
             for key in self.__keys__[i]:
                 #self.values[key] = self.__unpacked_data_elms__[i]
                 setattr(self, key, self.__unpacked_data_elms__[i])
-    
-    
+
+
     def __pack__(self):
-        
+
         new_values = []
-        
+
         for i in xrange(len(self.__unpacked_data_elms__)):
-            
+
             for key in self.__keys__[i]:
                 new_val = getattr(self, key)
                 old_val = self.__unpacked_data_elms__[i]
-                
+
                 # In the case of Unions, when the first changed value
                 # is picked the loop is exited
                 if new_val != old_val:
                     break
-            
+
             new_values.append(new_val)
-        
+
         return struct.pack(self.__format__, *new_values)
-                
-    
+
+
     def __str__(self):
         return '\n'.join( self.dump() )
-    
+
     def __repr__(self):
         return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] ))
-        
-    
+
+
     def dump(self, indentation=0):
         """Returns a string representation of the structure."""
-        
+
         dump = []
-        
+
         dump.append('[%s]' % self.name)
-        
+
         # Refer to the __set_format__ method for an explanation
         # of the following construct.
         for keys in self.__keys__:
             for key in keys:
-                
+
                 val = getattr(self, key)
                 if isinstance(val, int) or isinstance(val, long):
                     val_str = '0x%-8X' % (val)
@@ -932,110 +929,110 @@
                             val_str += ' [INVALID TIME]'
                 else:
                     val_str = ''.join(filter(lambda c:c != '\0', str(val)))
-                
+
                 dump.append('0x%-8X 0x%-3X %-30s %s' % (
-                    self.__field_offsets__[key] + self.__file_offset__, 
+                    self.__field_offsets__[key] + self.__file_offset__,
                     self.__field_offsets__[key], key+':', val_str))
-        
+
         return dump
 
 
 
 class SectionStructure(Structure):
     """Convenience section handling class."""
-    
+
     def __init__(self, *argl, **argd):
         if 'pe' in argd:
             self.pe = argd['pe']
             del argd['pe']
-            
+
         Structure.__init__(self, *argl, **argd)
-        
+
     def get_data(self, start=None, length=None):
         """Get data chunk from a section.
-        
+
         Allows to query data from the section by passing the
         addresses where the PE file would be loaded by default.
         It is then possible to retrieve code and data by its real
         addresses as it would be if loaded.
         """
-        
+
         PointerToRawData_adj = self.pe.adjust_FileAlignment( self.PointerToRawData,
             self.pe.OPTIONAL_HEADER.FileAlignment )
-        VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress, 
+        VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress,
             self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
 
         if start is None:
             offset = PointerToRawData_adj
         else:
             offset = ( start - VirtualAddress_adj ) + PointerToRawData_adj
-        
+
         if length is not None:
             end = offset + length
         else:
             end = offset + self.SizeOfRawData
-            
+
         # PointerToRawData is not adjusted here as we might want to read any possible extra bytes
         # that might get cut off by aligning the start (and hence cutting something off the end)
         #
         if end > self.PointerToRawData + self.SizeOfRawData:
             end = self.PointerToRawData + self.SizeOfRawData
-        
+
         return self.pe.__data__[offset:end]
-    
-    
+
+
     def __setattr__(self, name, val):
-    
+
         if name == 'Characteristics':
             section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
-            
+
             # Set the section's flags according the the Characteristics member
             set_flags(self, val, section_flags)
-            
+
         elif 'IMAGE_SCN_' in name and hasattr(self, name):
             if val:
                 self.__dict__['Characteristics'] |= SECTION_CHARACTERISTICS[name]
             else:
                 self.__dict__['Characteristics'] ^= SECTION_CHARACTERISTICS[name]
-                
+
         self.__dict__[name] = val
-    
-    
+
+
     def get_rva_from_offset(self, offset):
         return offset - self.pe.adjust_FileAlignment( self.PointerToRawData,
-            self.pe.OPTIONAL_HEADER.FileAlignment ) + self.pe.adjust_SectionAlignment( self.VirtualAddress, 
+            self.pe.OPTIONAL_HEADER.FileAlignment ) + self.pe.adjust_SectionAlignment( self.VirtualAddress,
             self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
-    
-    
+
+
     def get_offset_from_rva(self, rva):
-        return (rva - 
-            self.pe.adjust_SectionAlignment( 
-                self.VirtualAddress, 
-                self.pe.OPTIONAL_HEADER.SectionAlignment, 
+        return (rva -
+            self.pe.adjust_SectionAlignment(
+                self.VirtualAddress,
+                self.pe.OPTIONAL_HEADER.SectionAlignment,
                 self.pe.OPTIONAL_HEADER.FileAlignment )
-            ) + self.pe.adjust_FileAlignment( 
+            ) + self.pe.adjust_FileAlignment(
                 self.PointerToRawData,
                 self.pe.OPTIONAL_HEADER.FileAlignment )
-    
-    
+
+
     def contains_offset(self, offset):
         """Check whether the section contains the file offset provided."""
-        
+
         if self.PointerToRawData is None:
            # bss and other sections containing only uninitialized data must have 0
            # and do not take space in the file
            return False
-        return ( self.pe.adjust_FileAlignment( self.PointerToRawData, 
-                self.pe.OPTIONAL_HEADER.FileAlignment ) <= 
-                    offset < 
+        return ( self.pe.adjust_FileAlignment( self.PointerToRawData,
+                self.pe.OPTIONAL_HEADER.FileAlignment ) <=
+                    offset <
                         self.pe.adjust_FileAlignment( self.PointerToRawData,
-                            self.pe.OPTIONAL_HEADER.FileAlignment ) + 
+                            self.pe.OPTIONAL_HEADER.FileAlignment ) +
                                 self.SizeOfRawData )
-    
-    
+
+
     def contains_rva(self, rva):
         """Check whether the section contains the address provided."""
-        
+
         # Check if the SizeOfRawData is realistic. If it's bigger than the size of
         # the whole PE file minus the start address of the section it could be
         # either truncated or the SizeOfRawData contain a misleading value.
@@ -1053,88 +1050,88 @@
         else:
             size = max(self.SizeOfRawData, self.Misc_VirtualSize)
 
-        VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress, 
+        VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress,
             self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )
-        
+
         # Check if there's any further section that will start before the
         # calculated end for the current one, if so cut the current's size
         # to fit in the range until the next one starts.
         for s in self.pe.sections:
             if s.VirtualAddress > self.VirtualAddress and VirtualAddress_adj + size > s.VirtualAddress:
                     size = s.VirtualAddress - VirtualAddress_adj
-        
+
         return VirtualAddress_adj <= rva < VirtualAddress_adj + size
-    
-    
+
+
     def contains(self, rva):
         #print "DEPRECATION WARNING: you should use contains_rva() instead of contains()"
         return self.contains_rva(rva)
-    
-    
+
+
     #def set_data(self, data):
     #    """Set the data belonging to the section."""
-    #    
+    #
     #    self.data = data
-        
-    
+
+
     def get_entropy(self):
         """Calculate and return the entropy for the section."""
-        
+
         return self.entropy_H( self.get_data() )
-    
-    
+
+
     def get_hash_sha1(self):
         """Get the SHA-1 hex-digest of the section's data."""
-        
+
         if sha1 is not None:
             return sha1( self.get_data() ).hexdigest()
-    
-    
+
+
     def get_hash_sha256(self):
         """Get the SHA-256 hex-digest of the section's data."""
-        
+
         if sha256 is not None:
             return sha256( self.get_data() ).hexdigest()
-    
-    
+
+
     def get_hash_sha512(self):
         """Get the SHA-512 hex-digest of the section's data."""
-        
+
         if sha512 is not None:
             return sha512( self.get_data() ).hexdigest()
-    
-    
+
+
     def get_hash_md5(self):
         """Get the MD5 hex-digest of the section's data."""
-        
+
         if md5 is not None:
             return md5( self.get_data() ).hexdigest()
-    
-    
+
+
     def entropy_H(self, data):
         """Calculate the entropy of a chunk of data."""
-        
+
         if len(data) == 0:
             return 0.0
-        
+
         occurences = array.array('L', [0]*256)
-        
+
         for x in data:
             occurences[ord(x)] += 1
-        
+
         entropy = 0
         for x in occurences:
             if x:
                 p_x = float(x) / len(data)
                 entropy -= p_x*math.log(p_x, 2)
-        
+
         return entropy
 
 
 
 class DataContainer:
     """Generic data container."""
-    
+
     def __init__(self, **args):
         for key, value in args.items():
             setattr(self, key, value)
@@ -1143,7 +1140,7 @@
 
 class ImportDescData(DataContainer):
     """Holds import descriptor information.
-    
+
     dll:        name of the imported DLL
     imports:    list of imported symbols (ImportData instances)
     struct:     IMAGE_IMPORT_DESCRIPTOR structure
@@ -1151,14 +1148,14 @@
 
 class ImportData(DataContainer):
     """Holds imported symbol's information.
-    
+
     ordinal:    Ordinal of the symbol
     name:       Name of the symbol
     bound:      If the symbol is bound, this contains
                 the address.
     """
 
-    
+
     def __setattr__(self, name, val):
 
         # If the instance doesn't yet have an ordinal attribute
@@ -1166,7 +1163,7 @@
         # following
         #
         if hasattr(self, 'ordinal') and hasattr(self, 'bound') and hasattr(self, 'name'):
-        
+
             if name == 'ordinal':
 
                 if self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
@@ -1174,7 +1171,7 @@
                 elif self.pe.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
                     ordinal_flag = IMAGE_ORDINAL_FLAG64
 
-                # Set the ordinal and flag the entry as imporing by ordinal
+                # Set the ordinal and flag the entry as importing by ordinal
                 self.struct_table.Ordinal = ordinal_flag | (val & 0xffff)
                 self.struct_table.AddressOfData = self.struct_table.Ordinal
                 self.struct_table.Function = self.struct_table.Ordinal
@@ -1196,7 +1193,7 @@
 
                     name_rva = self.pe.get_rva_from_offset( self.name_offset )
                     self.pe.set_dword_at_offset( self.ordinal_offset, (0<<31) | name_rva )
-                
+
                     # Complain if the length of the new name is longer than the existing one
                     if len(val) > len(self.name):
                         #raise Exception('The export name provided is longer than the existing one.')
@@ -1204,18 +1201,18 @@
                     self.pe.set_bytes_at_offset( self.name_offset, val )
 
         self.__dict__[name] = val
-        
-        
+
+
 class ExportDirData(DataContainer):
     """Holds export directory information.
-    
+
     struct:     IMAGE_EXPORT_DIRECTORY structure
     symbols:    list of exported symbols (ExportData instances)
 """
 
 class ExportData(DataContainer):
     """Holds exported symbols' information.
-    
+
     ordinal:    ordinal of the symbol
     address:    address of the symbol
     name:       name of the symbol (None if the symbol is
@@ -1232,7 +1229,7 @@
         # following
         #
         if hasattr(self, 'ordinal') and hasattr(self, 'address') and hasattr(self, 'forwarder') and hasattr(self, 'name'):
-        
+
             if name == 'ordinal':
                 self.pe.set_word_at_offset( self.ordinal_offset, val )
             elif name == 'address':
@@ -1255,14 +1252,14 @@
 
 class ResourceDirData(DataContainer):
     """Holds resource directory information.
-    
+
     struct:     IMAGE_RESOURCE_DIRECTORY structure
     entries:    list of entries (ResourceDirEntryData instances)
     """
 
 class ResourceDirEntryData(DataContainer):
     """Holds resource directory entry data.
-    
+
     struct:     IMAGE_RESOURCE_DIRECTORY_ENTRY structure
     name:       If the resource is identified by name this
                 attribute will contain the name string. None
@@ -1282,7 +1279,7 @@
 
 class ResourceDataEntryData(DataContainer):
     """Holds resource data entry information.
-    
+
     struct:     IMAGE_RESOURCE_DATA_ENTRY structure
     lang:       Primary language ID
     sublang:    Sublanguage ID
@@ -1290,20 +1287,20 @@
 
 class DebugData(DataContainer):
     """Holds debug information.
-    
+
     struct:     IMAGE_DEBUG_DIRECTORY structure
     """
 
 class BaseRelocationData(DataContainer):
     """Holds base relocation information.
-    
+
     struct:     IMAGE_BASE_RELOCATION structure
     entries:    list of relocation data (RelocationData instances)
     """
 
 class RelocationData(DataContainer):
     """Holds relocation information.
-    
+
     type:       Type of relocation
                 The type string is can be obtained by
                 RELOCATION_TYPE[type]
@@ -1319,7 +1316,7 @@
             # Get the word containing the type and data
             #
             word = self.struct.Data
-        
+
             if name == 'type':
                 word = (val << 12) | (word & 0xfff)
             elif name == 'rva':
@@ -1336,20 +1333,20 @@
 
 class TlsData(DataContainer):
     """Holds TLS information.
-    
+
     struct:     IMAGE_TLS_DIRECTORY structure
     """
 
 class BoundImportDescData(DataContainer):
     """Holds bound import descriptor data.
-    
+
     This directory entry will provide with information on the
     DLLs this PE files has been bound to (if bound at all).
     The structure will contain the name and timestamp of the
     DLL at the time of binding so that the loader can know
     whether it differs from the one currently present in the
     system and must, therefore, re-bind the PE's imports.
-    
+
     struct:     IMAGE_BOUND_IMPORT_DESCRIPTOR structure
     name:       DLL name
     entries:    list of entries (BoundImportRefData instances)
@@ -1360,17 +1357,17 @@
 
 class LoadConfigData(DataContainer):
     """Holds Load Config data.
-    
+
     struct:     IMAGE_LOAD_CONFIG_DIRECTORY structure
     name:       dll name
     """
 
 class BoundImportRefData(DataContainer):
     """Holds bound import forwarder reference data.
-    
+
     Contains the same information as the bound descriptor but
     for forwarded DLLs, if any.
-    
+
     struct:     IMAGE_BOUND_FORWARDER_REF structure
     name:       dll name
     """
@@ -1379,9 +1376,9 @@
 # Valid FAT32 8.3 short filename characters according to:
 #  http://en.wikipedia.org/wiki/8.3_filename
 # This will help decide whether DLL ASCII names are likely
-# to be valid of otherwise corruted data
+# to be valid or otherwise corrupt data
 #
-# The flename length is not checked because the DLLs filename
+# The filename length is not checked because the DLLs filename
 # can be longer that the 8.3
 allowed_filename = string.lowercase + string.uppercase + string.digits + "!#$%&'()-@^_`{}~+,.;=[]" + ''.join( [chr(i) for i in range(128, 256)] )
 def is_valid_dos_filename(s):
@@ -1410,48 +1407,48 @@
 
 class PE:
     """A Portable Executable representation.
-    
+
     This class provides access to most of the information in a PE file.
-    
+
     It expects to be supplied the name of the file to load or PE data
     to process and an optional argument 'fast_load' (False by default)
     which controls whether to load all the directories information,
     which can be quite time consuming.
-    
+
     pe = pefile.PE('module.dll')
     pe = pefile.PE(name='module.dll')
-    
+
     would load 'module.dll' and process it. If the data would be already
     available in a buffer the same could be achieved with:
-    
+
     pe = pefile.PE(data=module_dll_data)
-    
+
     The "fast_load" can be set to a default by setting its value in the
-    module itself by means,for instance, of a "pefile.fast_load = True".
+    module itself by means, for instance, of a "pefile.fast_load = True".
     That will make all the subsequent instances not to load the
     whole PE structure. The "full_load" method can be used to parse
     the missing data at a later stage.
-    
+
     Basic headers information will be available in the attributes:
-    
+
     DOS_HEADER
     NT_HEADERS
     FILE_HEADER
     OPTIONAL_HEADER
-    
+
     All of them will contain among their attributes the members of the
     corresponding structures as defined in WINNT.H
-    
+
     The raw data corresponding to the header (from the beginning of the
     file up to the start of the first section) will be available in the
     instance's attribute 'header' as a string.
-    
+
     The sections will be available as a list in the 'sections' attribute.
     Each entry will contain as attributes all the structure's members.
-    
+
     Directory entries will be available as attributes (if they exist):
     (no other entries are processed at this point)
-    
+
     DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
     DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
     DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
@@ -1459,12 +1456,12 @@
     DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
     DIRECTORY_ENTRY_TLS
     DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
-    
+
     The following dictionary attributes provide ways of mapping different
     constants. They will accept the numeric value and return the string
     representation and the opposite, feed in the string and get the
     numeric constant:
-    
+
     DIRECTORY_ENTRY
     IMAGE_CHARACTERISTICS
     SECTION_CHARACTERISTICS
@@ -1476,11 +1473,11 @@
     LANG
     SUBLANG
     """
-    
+
     #
     # Format specifications for PE structures.
     #
-    
+
     __IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER',
         ('H,e_magic', 'H,e_cblp', 'H,e_cp',
         'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc',
@@ -1488,17 +1485,17 @@
         'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res',
         'H,e_oemid', 'H,e_oeminfo', '20s,e_res2',
         'I,e_lfanew'))
-    
+
     __IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER',
         ('H,Machine', 'H,NumberOfSections',
         'I,TimeDateStamp', 'I,PointerToSymbolTable',
         'I,NumberOfSymbols', 'H,SizeOfOptionalHeader',
         'H,Characteristics'))
-    
+
     __IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY',
         ('I,VirtualAddress', 'I,Size'))
-    
-    
+
+
     __IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER',
         ('H,Magic', 'B,MajorLinkerVersion',
         'B,MinorLinkerVersion', 'I,SizeOfCode',
@@ -1513,8 +1510,8 @@
         'I,SizeOfStackReserve', 'I,SizeOfStackCommit',
         'I,SizeOfHeapReserve', 'I,SizeOfHeapCommit',
         'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
-    
-    
+
+
     __IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64',
         ('H,Magic', 'B,MajorLinkerVersion',
         'B,MinorLinkerVersion', 'I,SizeOfCode',
@@ -1529,76 +1526,76 @@
         'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit',
         'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit',
         'I,LoaderFlags', 'I,NumberOfRvaAndSizes' ))
-        
-    
+
+
     __IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('I,Signature',))
-    
+
     __IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER',
         ('8s,Name', 'I,Misc,Misc_PhysicalAddress,Misc_VirtualSize',
         'I,VirtualAddress', 'I,SizeOfRawData', 'I,PointerToRawData',
         'I,PointerToRelocations', 'I,PointerToLinenumbers',
         'H,NumberOfRelocations', 'H,NumberOfLinenumbers',
         'I,Characteristics'))
-    
+
     __IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR',
         ('I,grAttrs', 'I,szName', 'I,phmod', 'I,pIAT', 'I,pINT',
         'I,pBoundIAT', 'I,pUnloadIAT', 'I,dwTimeStamp'))
-    
+
     __IMAGE_IMPORT_DESCRIPTOR_format__ =  ('IMAGE_IMPORT_DESCRIPTOR',
         ('I,OriginalFirstThunk,Characteristics',
         'I,TimeDateStamp', 'I,ForwarderChain', 'I,Name', 'I,FirstThunk'))
-    
+
     __IMAGE_EXPORT_DIRECTORY_format__ =  ('IMAGE_EXPORT_DIRECTORY',
         ('I,Characteristics',
         'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'I,Name',
         'I,Base', 'I,NumberOfFunctions', 'I,NumberOfNames',
         'I,AddressOfFunctions', 'I,AddressOfNames', 'I,AddressOfNameOrdinals'))
-    
+
     __IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY',
         ('I,Characteristics',
         'I,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion',
         'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
-    
+
     __IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
         ('I,Name',
         'I,OffsetToData'))
-    
+
     __IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY',
         ('I,OffsetToData', 'I,Size', 'I,CodePage', 'I,Reserved'))
-    
+
     __VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO',
         ('H,Length', 'H,ValueLength', 'H,Type' ))
-    
+
     __VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO',
         ('I,Signature', 'I,StrucVersion', 'I,FileVersionMS', 'I,FileVersionLS',
          'I,ProductVersionMS', 'I,ProductVersionLS', 'I,FileFlagsMask', 'I,FileFlags',
          'I,FileOS', 'I,FileType', 'I,FileSubtype', 'I,FileDateMS', 'I,FileDateLS'))
-    
+
     __StringFileInfo_format__ = ( 'StringFileInfo',
         ('H,Length', 'H,ValueLength', 'H,Type' ))
-    
+
     __StringTable_format__ = ( 'StringTable',
         ('H,Length', 'H,ValueLength', 'H,Type' ))
-    
+
     __String_format__ = ( 'String',
         ('H,Length', 'H,ValueLength', 'H,Type' ))
-    
+
     __Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' ))
-    
+
     __IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA',
         ('I,ForwarderString,Function,Ordinal,AddressOfData',))
-    
+
     __IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA',
         ('Q,ForwarderString,Function,Ordinal,AddressOfData',))
-    
+
     __IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY',
         ('I,Characteristics', 'I,TimeDateStamp', 'H,MajorVersion',
         'H,MinorVersion', 'I,Type', 'I,SizeOfData', 'I,AddressOfRawData',
         'I,PointerToRawData'))
-    
+
     __IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION',
         ('I,VirtualAddress', 'I,SizeOfBlock') )
-    
+
     __IMAGE_BASE_RELOCATION_ENTRY_format__ = ('IMAGE_BASE_RELOCATION_ENTRY',
         ('H,Data',) )
 
@@ -1606,12 +1603,12 @@
         ('I,StartAddressOfRawData', 'I,EndAddressOfRawData',
         'I,AddressOfIndex', 'I,AddressOfCallBacks',
         'I,SizeOfZeroFill', 'I,Characteristics' ) )
-    
+
     __IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY',
         ('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData',
         'Q,AddressOfIndex', 'Q,AddressOfCallBacks',
         'I,SizeOfZeroFill', 'I,Characteristics' ) )
-    
+
     __IMAGE_LOAD_CONFIG_DIRECTORY_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
         ('I,Size', 'I,TimeDateStamp',
         'H,MajorVersion', 'H,MinorVersion',
@@ -1627,7 +1624,7 @@
         'H,CSDVersion', 'H,Reserved1',
         'I,EditList', 'I,SecurityCookie',
         'I,SEHandlerTable', 'I,SEHandlerCount' ) )
-        
+
     __IMAGE_LOAD_CONFIG_DIRECTORY64_format__ = ('IMAGE_LOAD_CONFIG_DIRECTORY',
         ('I,Size', 'I,TimeDateStamp',
       'H,MajorVersion', 'H,MinorVersion',
@@ -1643,31 +1640,31 @@
       'H,CSDVersion', 'H,Reserved1',
       'Q,EditList', 'Q,SecurityCookie',
       'Q,SEHandlerTable', 'Q,SEHandlerCount' ) )
-    
+
     __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR',
         ('I,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
-    
+
     __IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF',
         ('I,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') )
-    
-    
+
+
     def __init__(self, name=None, data=None, fast_load=None):
-        
+
         self.sections = []
-        
+
         self.__warnings = []
-        
+
         self.PE_TYPE = None
-        
+
         if  not name and not data:
             return
-        
+
         # This list will keep track of all the structures created.
         # That will allow for an easy iteration through the list
         # in order to save the modifications made
         self.__structures__ = []
         self.__from_file = None
-        
+
         if not fast_load:
             fast_load = globals()['fast_load']
         try:
@@ -1675,23 +1672,23 @@
         except:
             self.close()
             raise
-            
-            
+
+
     def close(self):
-        if ( self.__from_file is True and hasattr(self, '__data__') and 
+        if ( self.__from_file is True and hasattr(self, '__data__') and
             ((isinstance(mmap.mmap, type) and isinstance(self.__data__, mmap.mmap)) or
            'mmap.mmap' in repr(type(self.__data__))) ):
                 self.__data__.close()
-    
+
 
     def __unpack_data__(self, format, data, file_offset):
         """Apply structure format to raw data.
-        
+
         Returns and unpacked structure object if successful, None otherwise.
         """
-        
+
         structure = Structure(format, file_offset=file_offset)
-        
+
         try:
             structure.__unpack__(data)
         except PEFormatError, err:
@@ -1699,19 +1696,19 @@
                 'Corrupt header "%s" at file offset %d. Exception: %s' % (
                     format[0], file_offset, str(err))  )
             return None
-        
+
         self.__structures__.append(structure)
-        
+
         return structure
-        
-    
+
+
     def __parse__(self, fname, data, fast_load):
         """Parse a Portable Executable file.
-        
+
         Loads a PE file, parsing all its structures and making them available
         through the instance's attributes.
         """
-        
+
         if fname:
             stat = os.stat(fname)
             if stat.st_size == 0:
@@ -1726,33 +1723,33 @@
         elif data:
             self.__data__ = data
             self.__from_file = False
-        
+
         dos_header_data = self.__data__[:64]
         if len(dos_header_data) != 64:
             raise PEFormatError('Unable to read the DOS Header, possibly a truncated file.')
-            
+
         self.DOS_HEADER = self.__unpack_data__(
             self.__IMAGE_DOS_HEADER_format__,
             dos_header_data, file_offset=0)
-        
+
         if self.DOS_HEADER.e_magic == IMAGE_DOSZM_SIGNATURE:
             raise PEFormatError('Probably a ZM Executable (not a PE file).')
         if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
             raise PEFormatError('DOS Header magic not found.')
-        
+
         # OC Patch:
         # Check for sane value in e_lfanew
         #
         if self.DOS_HEADER.e_lfanew > len(self.__data__):
             raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
-        
+
         nt_headers_offset = self.DOS_HEADER.e_lfanew
-        
+
         self.NT_HEADERS = self.__unpack_data__(
             self.__IMAGE_NT_HEADERS_format__,
             self.__data__[nt_headers_offset:nt_headers_offset+8],
             file_offset = nt_headers_offset)
-        
+
         # We better check the signature right here, before the file screws
         # around with sections:
         # OC Patch:
@@ -1768,90 +1765,90 @@
             raise PEFormatError('Invalid NT Headers signature. Probably a LX file')
         if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
             raise PEFormatError('Invalid NT Headers signature.')
-        
+
         self.FILE_HEADER = self.__unpack_data__(
             self.__IMAGE_FILE_HEADER_format__,
             self.__data__[nt_headers_offset+4:nt_headers_offset+4+32],
             file_offset = nt_headers_offset+4)
         image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
-        
+
         if not self.FILE_HEADER:
             raise PEFormatError('File Header missing')
-        
+
         # Set the image's flags according the the Characteristics member
         set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
-        
+
         optional_header_offset =    \
             nt_headers_offset+4+self.FILE_HEADER.sizeof()
-        
+
         # Note: location of sections can be controlled from PE header:
         sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
-        
+
         self.OPTIONAL_HEADER = self.__unpack_data__(
             self.__IMAGE_OPTIONAL_HEADER_format__,
             # Read up to 256 bytes to allow creating a copy of too much data
             self.__data__[optional_header_offset:optional_header_offset+256],
             file_offset = optional_header_offset)
-        
+
         # According to solardesigner's findings for his
         # Tiny PE project, the optional header does not
         # need fields beyond "Subsystem" in order to be
-        # loadable by the Windows loader (given that zeroes
+        # loadable by the Windows loader (given that zeros
         # are acceptable values and the header is loaded
         # in a zeroed memory page)
         # If trying to parse a full Optional Header fails
         # we try to parse it again with some 0 padding
         #
         MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
-        
+
         if ( self.OPTIONAL_HEADER is None and
             len(self.__data__[optional_header_offset:optional_header_offset+0x200])
                 >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
-            
-            # Add enough zeroes to make up for the unused fields
+
+            # Add enough zeros to make up for the unused fields
             #
             padding_length = 128
-            
+
             # Create padding
             #
             padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
                 '\0' * padding_length)
-            
+
             self.OPTIONAL_HEADER = self.__unpack_data__(
                 self.__IMAGE_OPTIONAL_HEADER_format__,
                 padded_data,
                 file_offset = optional_header_offset)
-            
-        
+
+
         # Check the Magic in the OPTIONAL_HEADER and set the PE file
         # type accordingly
         #
         if self.OPTIONAL_HEADER is not None:
-            
+
             if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
-                
+
                 self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
-            
+
             elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
-                
+
                 self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
-                
+
                 self.OPTIONAL_HEADER = self.__unpack_data__(
                     self.__IMAGE_OPTIONAL_HEADER64_format__,
                     self.__data__[optional_header_offset:optional_header_offset+0x200],
                     file_offset = optional_header_offset)
-                
+
                 # Again, as explained above, we try to parse
                 # a reduced form of the Optional Header which
                 # is still valid despite not including all
                 # structure members
                 #
                 MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4
-                
+
                 if ( self.OPTIONAL_HEADER is None and
                     len(self.__data__[optional_header_offset:optional_header_offset+0x200])
                         >= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
-                    
+
                     padding_length = 128
                     padded_data = self.__data__[optional_header_offset:optional_header_offset+0x200] + (
                         '\0' * padding_length)
@@ -1859,36 +1856,36 @@
                         self.__IMAGE_OPTIONAL_HEADER64_format__,
                         padded_data,
                         file_offset = optional_header_offset)
-        
-        
+
+
         if not self.FILE_HEADER:
             raise PEFormatError('File Header missing')
-        
-        
+
+
         # OC Patch:
         # Die gracefully if there is no OPTIONAL_HEADER field
         # 975440f5ad5e2e4a92c4d9a5f22f75c1
         if self.PE_TYPE is None or self.OPTIONAL_HEADER is None:
             raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file")
-        
+
         dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
-        
+
         # Set the Dll Characteristics flags according the the DllCharacteristics member
         set_flags(
             self.OPTIONAL_HEADER,
             self.OPTIONAL_HEADER.DllCharacteristics,
             dll_characteristics_flags)
-        
-        
+
+
         self.OPTIONAL_HEADER.DATA_DIRECTORY = []
         #offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader)
         offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof())
-        
-        
+
+
         self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
         self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
-        
-        
+
+
         # The NumberOfRvaAndSizes is sanitized to stay within
         # reasonable limits so can be casted to an int
         #
@@ -1897,37 +1894,37 @@
                 'Suspicious NumberOfRvaAndSizes in the Optional Header. ' +
                 'Normal values are never larger than 0x10, the value is: 0x%x' %
                 self.OPTIONAL_HEADER.NumberOfRvaAndSizes )
-        
+
         MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES = 0x100
         for i in xrange(int(0x7fffffffL & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
-            
+
             if len(self.__data__) - offset == 0:
                 break
-            
+
             if len(self.__data__) - offset < 8:
                 data = self.__data__[offset:] + '\0'*8
             else:
                 data = self.__data__[offset:offset+MAX_ASSUMED_VALID_NUMBER_OF_RVA_AND_SIZES]
-            
+
             dir_entry = self.__unpack_data__(
                 self.__IMAGE_DATA_DIRECTORY_format__,
                 data,
                 file_offset = offset)
-            
+
             if dir_entry is None:
                 break
-            
+
             # Would fail if missing an entry
             # 1d4937b2fa4d84ad1bce0309857e70ca offending sample
             try:
                 dir_entry.name = DIRECTORY_ENTRY[i]
             except (KeyError, AttributeError):
                 break
-            
+
             offset += dir_entry.sizeof()
-            
+
             self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
-            
+
             # If the offset goes outside the optional header,
             # the loop is broken, regardless of how many directories
             # NumberOfRvaAndSizes says there are
@@ -1939,12 +1936,12 @@
             # is wrong and stop processing
             if offset >= (optional_header_offset +
                 self.OPTIONAL_HEADER.sizeof() + 8*16) :
-                
+
                 break
-                        
-        
+
+
         offset = self.parse_sections(sections_offset)
-        
+
         # OC Patch:
         # There could be a problem if there are no raw data sections
         # greater than 0
@@ -1954,45 +1951,45 @@
         #
         rawDataPointers = [
             self.adjust_FileAlignment( s.PointerToRawData,
-                self.OPTIONAL_HEADER.FileAlignment ) 
+                self.OPTIONAL_HEADER.FileAlignment )
             for s in self.sections if s.PointerToRawData>0 ]
-        
+
         if len(rawDataPointers) > 0:
             lowest_section_offset = min(rawDataPointers)
         else:
             lowest_section_offset = None
-        
+
         if not lowest_section_offset or lowest_section_offset < offset:
             self.header = self.__data__[:offset]
         else:
             self.header = self.__data__[:lowest_section_offset]
-        
-        
+
+
         # Check whether the entry point lies within a section
         #
         if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
-            
+
             # Check whether the entry point lies within the file
             #
             ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
             if ep_offset > len(self.__data__):
-                
+
                 self.__warnings.append(
                     'Possibly corrupt file. AddressOfEntryPoint lies outside the file. ' +
                     'AddressOfEntryPoint: 0x%x' %
                     self.OPTIONAL_HEADER.AddressOfEntryPoint )
-        
+
         else:
-            
+
             self.__warnings.append(
                 'AddressOfEntryPoint lies outside the sections\' boundaries. ' +
                 'AddressOfEntryPoint: 0x%x' %
                 self.OPTIONAL_HEADER.AddressOfEntryPoint )
-        
-        
+
+
         if not fast_load:
             self.parse_data_directories()
-            
+
             class RichHeader:
                 pass
             rich_header = self.parse_rich_header()
@@ -2007,19 +2004,19 @@
     def parse_rich_header(self):
         """Parses the rich header
         see http://www.ntcore.com/files/richsign.htm for more information
-        
+
         Structure:
         00 DanS ^ checksum, checksum, checksum, checksum
         10 Symbol RVA ^ checksum, Symbol size ^ checksum...
         ...
         XX Rich, checksum, 0, 0,...
         """
-        
+
         # Rich Header constants
         #
         DANS = 0x536E6144 # 'DanS' as dword
         RICH = 0x68636952 # 'Rich' as dword
-        
+
         # Read a block of data
         #
         try:
@@ -2041,101 +2038,101 @@
         headervalues = []
         result ["values"] = headervalues
 
-        data = data[4:] 
+        data = data[4:]
         for i in xrange(len(data) / 2):
-            
+
             # Stop until the Rich footer signature is found
             #
             if data[2 * i] == RICH:
-                
+
                 # it should be followed by the checksum
                 #
                 if data[2 * i + 1] != checksum:
                     self.__warnings.append('Rich Header corrupted')
                 break
-            
+
             # header values come by pairs
             #
             headervalues += [data[2 * i] ^ checksum, data[2 * i + 1] ^ checksum]
         return result
-        
-    
+
+
     def get_warnings(self):
         """Return the list of warnings.
-        
+
         Non-critical problems found when parsing the PE file are
         appended to a list of warnings. This method returns the
         full list.
         """
-        
+
         return self.__warnings
-        
-    
+
+
     def show_warnings(self):
         """Print the list of warnings.
-        
+
         Non-critical problems found when parsing the PE file are
         appended to a list of warnings. This method prints the
         full list to standard output.
         """
-        
+
         for warning in self.__warnings:
             print '>', warning
-    
-    
+
+
     def full_load(self):
         """Process the data directories.
-        
+
         This method will load the data directories which might not have
         been loaded if the "fast_load" option was used.
         """
-        
+
         self.parse_data_directories()
-        
-    
+
+
     def write(self, filename=None):
         """Write the PE file.
-        
+
         This function will process all headers and components
         of the PE file and include all changes made (by just
         assigning to attributes in the PE objects) and write
         the changes back to a file whose name is provided as
-        an argument. The filename is optional, if not 
+        an argument. The filename is optional, if not
         provided the data will be returned as a 'str' object.
         """
-        
+
         file_data = list(self.__data__)
         for structure in self.__structures__:
-            
+
             struct_data = list(structure.__pack__())
             offset = structure.get_file_offset()
-            
+
             file_data[offset:offset+len(struct_data)] = struct_data
-        
+
         if hasattr(self, 'VS_VERSIONINFO'):
             if hasattr(self, 'FileInfo'):
                 for entry in self.FileInfo:
                     if hasattr(entry, 'StringTable'):
                         for st_entry in entry.StringTable:
                             for key, entry in st_entry.entries.items():
-                                
+
                                 offsets = st_entry.entries_offsets[key]
                                 lengths = st_entry.entries_lengths[key]
-                                
+
                                 if len( entry ) > lengths[1]:
-                                    
+
                                     l = list()
                                     for idx, c in enumerate(entry):
                                         if ord(c) > 256:
                                             l.extend( [ chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ]  )
                                         else:
                                             l.extend( [chr( ord(c) ), '\0'] )
-                                    
+
                                     file_data[
                                         offsets[1] : offsets[1] + lengths[1]*2 ] = l
-                                
+
                                 else:
-                                    
+
                                     l = list()
                                     for idx, c in enumerate(entry):
                                         if ord(c) > 256:
@@ -2145,72 +2142,79 @@
 
                                     file_data[
                                         offsets[1] : offsets[1] + len(entry)*2 ] = l
-                                    
+
                                     remainder = lengths[1] - len(entry)
                                     file_data[
                                         offsets[1] + len(entry)*2 :
                                         offsets[1] + lengths[1]*2 ] = [
                                             u'\0' ] * remainder*2
-                
+
         new_file_data = ''.join( [ chr(ord(c)) for c in file_data] )
-        
+
         if filename:
             f = file(filename, 'wb+')
             f.write(new_file_data)
             f.close()
         else:
             return new_file_data
-    
-    
+
+
     def parse_sections(self, offset):
         """Fetch the PE file sections.
-        
+
         The sections will be readily available in the "sections" attribute.
         Its attributes will contain all the section information plus "data"
         a buffer containing the section's data.
-        
+
         The "Characteristics" member will be processed and attributes
         representing the section characteristics (with the 'IMAGE_SCN_'
         string trimmed from the constant's names) will be added to the
         section instance.
-        
+
         Refer to the SectionStructure class for additional info.
         """
-        
+
         self.sections = []
-        
+
         for i in xrange(self.FILE_HEADER.NumberOfSections):
             section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )
             if not section:
                 break
             section_offset = offset + section.sizeof() * i
             section.set_file_offset(section_offset)
-            section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()])
+            section_data = self.__data__[section_offset : section_offset + section.sizeof()]
+            # Check if the section is all nulls and stop if so.
+            if section_data.count('\0') == section.sizeof():
+                self.__warnings.append(
+                    ('Invalid section %d. ' % i) +
+                    'Contents are null-bytes.')
+                break
+            section.__unpack__(section_data)
             self.__structures__.append(section)
-            
+
             if section.SizeOfRawData > len(self.__data__):
                 self.__warnings.append(
                     ('Error parsing section %d. ' % i) +
                     'SizeOfRawData is larger than file.')
-            
+
             if self.adjust_FileAlignment( section.PointerToRawData,
                 self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
-                
+
                 self.__warnings.append(
                     ('Error parsing section %d. ' % i) +
                     'PointerToRawData points beyond the end of the file.')
-            
+
             if section.Misc_VirtualSize > 0x10000000:
                 self.__warnings.append(
                     ('Suspicious value found parsing section %d. ' % i) +
                     'VirtualSize is extremely large > 256MiB.')
-            
-            if self.adjust_SectionAlignment( section.VirtualAddress, 
+
+            if self.adjust_SectionAlignment( section.VirtualAddress,
                 self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:
                 self.__warnings.append(
                     ('Suspicious value found parsing section %d. ' % i) +
                     'VirtualAddress is beyond 0x10000000.')
-            
+
             #
             # Some packer used a non-aligned PointerToRawData in the sections,
             # which causes several common tools not to load the section data
@@ -2219,10 +2223,10 @@
             # offset multiple of FileAlignment which is smaller than
             # PointerToRawData. The following code will do the same.
             #
-            
+
             #alignment = self.OPTIONAL_HEADER.FileAlignment
             #self.update_section_data(section)
-            
+
             if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
                 ( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
                 self.__warnings.append(
@@ -2230,16 +2234,16 @@
                     'PointerToRawData should normally be ' +
                     'a multiple of FileAlignment, this might imply the file ' +
                     'is trying to confuse tools which parse this incorrectly')
-            
-            
+
+
             section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
-            
+
             # Set the section's flags according the the Characteristics member
             set_flags(section, section.Characteristics, section_flags)
-            
+
             if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False)  and
                 section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
-                
+
                 if section.Name == 'PAGE' and self.is_driver():
                     # Drivers can have a PAGE section with those flags set without
                     # implying that it is malicious
@@ -2249,38 +2253,38 @@
                         ('Suspicious flags set for section %d. ' % i) +
                         'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. ' +
                         'This might indicate a packed executable.')
-            
+
             self.sections.append(section)
-        
+
         if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
             return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
         else:
             return offset
-        
-    
-    
+
+
+
     def parse_data_directories(self, directories=None):
         """Parse and process the PE file's data directories.
-        
+
         If the optional argument 'directories' is given, only
-        the directories at the specified indices will be parsed.
+        the directories at the specified indexes will be parsed.
         Such functionality allows parsing of areas of interest
         without the burden of having to parse all others.
         The directories can then be specified as:
-        
-        For export/import only:
-        
+
+        For export / import only:
+
           directories = [ 0, 1 ]
-          
+
         or (more verbosely):
-        
-          directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], 
+
+          directories = [ DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
             DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'] ]
-            
+
         If 'directories' is a list, the ones that are processed will be removed,
         leaving only the ones that are not present in the image.
         """
-        
+
         directory_parsing = (
             ('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
             ('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
@@ -2291,11 +2295,11 @@
             ('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', self.parse_directory_load_config),
             ('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
             ('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) )
-            
+
         if directories is not None:
             if not isinstance(directories, (tuple, list)):
                 directories = [directories]
-        
+
         for entry in directory_parsing:
             # OC Patch:
             #
@@ -2304,32 +2308,32 @@
                 dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[directory_index]
             except IndexError:
                 break
-                
+
             # Only process all the directories if no individual ones have
             # been chosen
             #
             if directories is None or directory_index in directories:
-                
+
                 if dir_entry.VirtualAddress:
                     value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
                     if value:
                         setattr(self, entry[0][6:], value)
-                        
+
             if (directories is not None) and isinstance(directories, list) and (entry[0] in directories):
                 directories.remove(directory_index)
-                
-        
-    
+
+
+
     def parse_directory_bound_imports(self, rva, size):
         """"""
-        
+
         bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
         bnd_descr_size = bnd_descr.sizeof()
         start = rva
-        
+
         bound_imports = []
         while True:
-            
+
             bnd_descr = self.__unpack_data__(
                 self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
                    self.__data__[rva:rva+bnd_descr_size],
@@ -2338,17 +2342,17 @@
                 # If can't parse directory then silently return.
                 # This directory does not necessarily have to be valid to
                 # still have a valid PE file
-                
+
                 self.__warnings.append(
                     'The Bound Imports directory exists but can\'t be parsed.')
-                
+
                 return
-            
+
             if bnd_descr.all_zeroes():
                 break
-            
+
             rva += bnd_descr.sizeof()
-            
+
             section = self.get_section_by_offset(rva)
             file_offset = self.get_offset_from_rva(rva)
             if section is None:
@@ -2365,7 +2369,7 @@
                     'RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points to an invalid address: %x' %
                     rva)
                 return
-                
+
 
             forwarder_refs = []
             # 8 is the size of __IMAGE_BOUND_IMPORT_DESCRIPTOR_format__
@@ -2381,7 +2385,7 @@
                     raise PEFormatError(
                         "IMAGE_BOUND_FORWARDER_REF cannot be read")
                 rva += bnd_frwd_ref.sizeof()
-                
+
                 offset = start+bnd_frwd_ref.OffsetModuleName
                 name_str =  self.get_string_from_data(
                     0, self.__data__[offset : offset + MAX_STRING_LENGTH])
@@ -2399,11 +2403,11 @@
                 forwarder_refs.append(BoundImportRefData(
                     struct = bnd_frwd_ref,
                     name = name_str))
-                    
+
             offset = start+bnd_descr.OffsetModuleName
             name_str = self.get_string_from_data(
                 0, self.__data__[offset : offset + MAX_STRING_LENGTH])
-            
+
             if name_str:
                 invalid_chars = [c for c in name_str if c not in string.printable]
                 if len(name_str) > 256 or len(name_str) < 4 or invalid_chars:
@@ -2416,19 +2420,19 @@
                     struct = bnd_descr,
                     name = name_str,
                     entries = forwarder_refs))
-        
+
         return bound_imports
-        
-    
+
+
     def parse_directory_tls(self, rva, size):
         """"""
-        
+
         if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
             format = self.__IMAGE_TLS_DIRECTORY_format__
-        
+
         elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
             format = self.__IMAGE_TLS_DIRECTORY64_format__
-        
+
         try:
             tls_struct = self.__unpack_data__(
                 format,
@@ -2439,22 +2443,22 @@
                 'Invalid TLS information. Can\'t read ' +
                 'data at RVA: 0x%x' % rva)
             tls_struct = None
-        
+
         if not tls_struct:
             return None
-        
+
         return TlsData( struct = tls_struct )
-    
-    
+
+
     def parse_directory_load_config(self, rva, size):
         """"""
-        
+
         if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
             format = self.__IMAGE_LOAD_CONFIG_DIRECTORY_format__
-        
+
         elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
             format = self.__IMAGE_LOAD_CONFIG_DIRECTORY64_format__
-        
+
         try:
             load_config_struct = self.__unpack_data__(
                 format,
@@ -2468,19 +2472,19 @@
 
         if not load_config_struct:
             return None
-        
+
         return LoadConfigData( struct = load_config_struct )
 
 
     def parse_relocations_directory(self, rva, size):
         """"""
-        
+
         rlc_size = Structure(self.__IMAGE_BASE_RELOCATION_format__).sizeof()
         end = rva+size
-        
+
         relocations = []
         while rva < end:
-            
+
             # OC Patch:
             # Malware that has bad RVA entries will cause an error.
             # Just continue on after an exception
@@ -2495,17 +2499,17 @@
                     'Invalid relocation information. Can\'t read ' +
                     'data at RVA: 0x%x' % rva)
                 rlc = None
-            
+
             if not rlc:
                 break
-            
+
             # rlc.VirtualAddress must lie within the Image
             if rlc.VirtualAddress > self.OPTIONAL_HEADER.SizeOfImage:
                 self.__warnings.append(
                     'Invalid relocation information. VirtualAddress outside' +
                     ' of Image: 0x%x' % rlc.VirtualAddress)
                 break
-            
+
             # rlc.SizeOfBlock must be less or equal than the size of the image
             # (It's a rather loose sanity test)
             if rlc.SizeOfBlock > self.OPTIONAL_HEADER.SizeOfImage:
@@ -2513,40 +2517,40 @@
                     'Invalid relocation information. SizeOfBlock too large' +
                     ': %d' % rlc.SizeOfBlock)
                 break
-            
+
             reloc_entries = self.parse_relocations(
                 rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size )
-            
+
             relocations.append(
                 BaseRelocationData(
                     struct = rlc,
                     entries = reloc_entries))
-            
+
             if not rlc.SizeOfBlock:
                 break
             rva += rlc.SizeOfBlock
-        
+
         return relocations
-        
-    
+
+
     def parse_relocations(self, data_rva, rva, size):
         """"""
 
         data = self.get_data(data_rva, size)
         file_offset = self.get_offset_from_rva(data_rva)
-        
+
         entries = []
         for idx in xrange( len(data) / 2 ):
-            
+
             entry = self.__unpack_data__(
                 self.__IMAGE_BASE_RELOCATION_ENTRY_format__,
                 data[idx*2:(idx+1)*2],
                 file_offset = file_offset )
-                
+
             if not entry:
                 break
             word = entry.Data
-            
+
             reloc_type = (word>>12)
             reloc_offset = (word & 0x0fff)
             entries.append(
@@ -2556,15 +2560,15 @@
                     base_rva = rva,
                     rva = reloc_offset+rva))
             file_offset += entry.sizeof()
-        
+
         return entries
-        
-    
+
+
     def parse_debug_directory(self, rva, size):
         """"""
-        
+
         dbg_size = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__).sizeof()
-        
+
         debug = []
         for idx in xrange(size/dbg_size):
             try:
@@ -2574,31 +2578,31 @@
                     'Invalid debug information. Can\'t read ' +
                     'data at RVA: 0x%x' % rva)
                 return None
-            
+
             dbg = self.__unpack_data__(
                 self.__IMAGE_DEBUG_DIRECTORY_format__,
                 data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx))
-            
+
             if not dbg:
                 return None
-            
+
             debug.append(
                 DebugData(
                     struct = dbg))
-        
+
         return debug
-                        
-    
+
+
     def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0, dirs=None):
         """Parse the resources directory.
-        
+
         Given the RVA of the resources directory, it will process all
         its entries.
-        
+
         The root will have the corresponding member of its structure,
         IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
         entries in the directory.
-        
+
         Those entries will have, correspondingly, all the structure's
         members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
         "directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
@@ -2610,16 +2614,16 @@
         All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
         are available as its attributes.
         """
-        
+
         # OC Patch:
         if dirs is None:
             dirs = [rva]
-        
+
         if base_rva is None:
             base_rva = rva
-        
+
         resources_section = self.get_section_by_rva(rva)
-        
+
         try:
             # If the RVA is invalid all would blow up. Some EXEs seem to be
             # specially nasty and have an invalid RVA.
@@ -2629,7 +2633,7 @@
                 'Invalid resources directory. Can\'t read ' +
                 'directory data at RVA: 0x%x' % rva)
             return None
-        
+
         # Get the resource directory structure, that is, the header
         # of the table preceding the actual entries
         #
@@ -2644,19 +2648,19 @@
                 'Invalid resources directory. Can\'t parse ' +
                 'directory data at RVA: 0x%x' % rva)
             return None
-        
+
         dir_entries = []
-        
-        # Advance the RVA to the positon immediately following the directory
+
+        # Advance the RVA to the position immediately following the directory
         # table header and pointing to the first entry in the table
         #
         rva += resource_dir.sizeof()
-        
+
         number_of_entries = (
             resource_dir.NumberOfNamedEntries +
             resource_dir.NumberOfIdEntries )
-        
-        # Set a hard limit on the maximum resonable number of entries
+
+        # Set a hard limit on the maximum reasonable number of entries
         MAX_ALLOWED_ENTRIES = 4096
         if number_of_entries > MAX_ALLOWED_ENTRIES:
             self.__warnings.append(
@@ -2664,12 +2668,15 @@
                 'The directory contains %d entries (>%s)' %
                 (number_of_entries, MAX_ALLOWED_ENTRIES) )
             return None
-        
+
         strings_to_postprocess = list()
-        
+
+        # Keep track of the last name's start and end offsets in order
+        # to be able to detect overlapping entries that might suggest
+        # and invalid or corrupt directory.
+        last_name_begin_end = None
         for idx in xrange(number_of_entries):
-            
-            
+
             res = self.parse_resource_entry(rva)
             if res is None:
                 self.__warnings.append(
@@ -2677,30 +2684,43 @@
                     'Entry %d is invalid, RVA = 0x%x. ' %
                     (idx, rva) )
                 break
-            
+
             entry_name = None
             entry_id = None
-            
-            # If all named entries have been processed, only Id ones
-            # remain
-            
-            if idx >= resource_dir.NumberOfNamedEntries:
+
+            name_is_string = (res.Name & 0x80000000L) >> 31
+            if not name_is_string:
                 entry_id = res.Name
             else:
                 ustr_offset = base_rva+res.NameOffset
                 try:
-                    #entry_name = self.get_string_u_at_rva(ustr_offset, max_length=16)
                     entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
+                    # If the last entry's offset points before the current's but its end
+                    # is past the current's beginning, assume the overlap indicates a
+                    # corrupt name.
+                    if last_name_begin_end and (last_name_begin_end[0] < ustr_offset and
+                        last_name_begin_end[1] >= ustr_offset):
+                        # Remove the previous overlapping entry as it's likely to be already corrupt data.
+                        strings_to_postprocess.pop()
+                        self.__warnings.append(
+                            'Error parsing the resources directory, '
+                            'attempting to read entry name. '
+                            'Entry names overlap 0x%x' %
+                            (ustr_offset) )
+                        break
+
+                    last_name_begin_end = (ustr_offset, ustr_offset+entry_name.get_pascal_16_length())
+
                     strings_to_postprocess.append(entry_name)
-                
+
                 except PEFormatError, excp:
                     self.__warnings.append(
                         'Error parsing the resources directory, '
                         'attempting to read entry name. '
                         'Can\'t read unicode string at offset 0x%x' %
                         (ustr_offset) )
-                
-            
+
+
             if res.DataIsDirectory:
                 # OC Patch:
                 #
@@ -2714,19 +2734,18 @@
                 #
                 # 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
                 if (base_rva + res.OffsetToDirectory) in dirs:
-                    
                     break
-                
+
                 else:
                     entry_directory = self.parse_resources_directory(
                         base_rva+res.OffsetToDirectory,
                         size-(rva-base_rva), # size
                         base_rva=base_rva, level = level+1,
                         dirs=dirs + [base_rva + res.OffsetToDirectory])
-                
+
                 if not entry_directory:
                     break
-                    
+
                 # Ange Albertini's code to process resources' strings
                 #
                 strings = None
@@ -2734,61 +2753,61 @@
                     strings = dict()
                     for resource_id in entry_directory.entries:
                         if hasattr(resource_id, 'directory'):
-                            
+
                             resource_strings = dict()
-                            
+
                             for resource_lang in resource_id.directory.entries:
-                                
-                                
-                                if (resource_lang is None or not hasattr(resource_lang, 'data') or 
+
+
+                                if (resource_lang is None or not hasattr(resource_lang, 'data') or
                                     resource_lang.data.struct.Size is None or resource_id.id is None):
                                     continue
-                                    
+
                                 string_entry_rva = resource_lang.data.struct.OffsetToData
                                 string_entry_size = resource_lang.data.struct.Size
                                 string_entry_id = resource_id.id
-                                
+
                                 string_entry_data = self.get_data(string_entry_rva, string_entry_size)
                                 parse_strings( string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings )
                                 strings.update(resource_strings)
-                                
+
                             resource_id.directory.strings = resource_strings
-                                
+
                 dir_entries.append(
                     ResourceDirEntryData(
                         struct = res,
                         name = entry_name,
                         id = entry_id,
                         directory = entry_directory))
-            
+
             else:
                 struct = self.parse_resource_data_entry(
                     base_rva + res.OffsetToDirectory)
-                
+
                 if struct:
                     entry_data = ResourceDataEntryData(
                         struct = struct,
                         lang = res.Name & 0x3ff,
                         sublang = res.Name >> 10 )
-                    
+
                     dir_entries.append(
                         ResourceDirEntryData(
                             struct = res,
                             name = entry_name,
                             id = entry_id,
                             data = entry_data))
-                
+
                 else:
                     break
-                
-            
-            
+
+
+
             # Check if this entry contains version information
             #
             if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
                 if len(dir_entries)>0:
                     last_entry = dir_entries[-1]
-                
+
                 rt_version_struct = None
                 try:
                     rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
@@ -2796,30 +2815,29 @@
                     # Maybe a malformed directory structure...?
                     # Lets ignore it
                     pass
-                
+
                 if rt_version_struct is not None:
                     self.parse_version_information(rt_version_struct)
-            
+
             rva += res.sizeof()
-                    
-        
+
+
         string_rvas = [s.get_rva() for s in strings_to_postprocess]
         string_rvas.sort()
-        
+
         for idx, s in enumerate(strings_to_postprocess):
             s.render_pascal_16()
-            
-        
+
         resource_directory_data = ResourceDirData(
             struct = resource_dir,
             entries = dir_entries)
-        
+
         return resource_directory_data
-        
-    
+
+
     def parse_resource_data_entry(self, rva):
         """Parse a data entry from the resources directory."""
-        
+
         try:
             # If the RVA is invalid all would blow up. Some EXEs seem to be
             # specially nasty and have an invalid RVA.
@@ -2829,14 +2847,14 @@
                 'Error parsing a resource directory data entry, ' +
                 'the RVA is invalid: 0x%x' % ( rva ) )
             return None
-        
+
         data_entry = self.__unpack_data__(
             self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data,
             file_offset = self.get_offset_from_rva(rva) )
-        
+
         return data_entry
-        
-    
+
+
     def parse_resource_entry(self, rva):
         """Parse a directory entry from the resources directory."""
 
@@ -2849,62 +2867,62 @@
         resource = self.__unpack_data__(
             self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, data,
             file_offset = self.get_offset_from_rva(rva) )
-        
+
         if resource is None:
             return None
-        
+
         #resource.NameIsString = (resource.Name & 0x80000000L) >> 31
         resource.NameOffset = resource.Name & 0x7FFFFFFFL
-        
+
         resource.__pad = resource.Name & 0xFFFF0000L
         resource.Id = resource.Name & 0x0000FFFFL
-        
+
         resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
         resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
-        
+
         return resource
-        
-    
+
+
     def parse_version_information(self, version_struct):
         """Parse version information structure.
-        
+
         The date will be made available in three attributes of the PE object.
-        
+
         VS_VERSIONINFO     will contain the first three fields of the main structure:
             'Length', 'ValueLength', and 'Type'
-        
+
         VS_FIXEDFILEINFO    will hold the rest of the fields, accessible as sub-attributes:
             'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
             'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
             'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
-        
+
         FileInfo    is a list of all StringFileInfo and VarFileInfo structures.
-        
+
         StringFileInfo structures will have a list as an attribute named 'StringTable'
         containing all the StringTable structures. Each of those structures contains a
-        dictionary 'entries' with all the key/value version information string pairs.
-        
+        dictionary 'entries' with all the key / value version information string pairs.
+
         VarFileInfo structures will have a list as an attribute named 'Var' containing
         all Var structures. Each Var structure will have a dictionary as an attribute
         named 'entry' which will contain the name and value of the Var.
         """
-        
-        
+
+
         # Retrieve the data for the version info resource
         #
         start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
         raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
-        
-        
+
+
         # Map the main structure and the subsequent string
         #
         versioninfo_struct = self.__unpack_data__(
             self.__VS_VERSIONINFO_format__, raw_data,
             file_offset = start_offset )
-        
+
         if versioninfo_struct is None:
             return
-        
+
         ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
         try:
             versioninfo_string = self.get_string_u_at_rva( ustr_offset )
@@ -2914,26 +2932,26 @@
                 'attempting to read VS_VERSION_INFO string. Can\'t ' +
                 'read unicode string at offset 0x%x' % (
                 ustr_offset ) )
-            
+
             versioninfo_string = None
-        
+
         # If the structure does not contain the expected name, it's assumed to be invalid
         #
         if versioninfo_string != u'VS_VERSION_INFO':
-            
+
             self.__warnings.append('Invalid VS_VERSION_INFO block')
             return
-        
-        
+
+
         # Set the PE object's VS_VERSIONINFO to this one
         #
         self.VS_VERSIONINFO = versioninfo_struct
-        
+
         # The the Key attribute to point to the unicode string identifying the structure
         #
         self.VS_VERSIONINFO.Key = versioninfo_string
-        
-        
+
+
         # Process the fixed version information, get the offset and structure
         #
         fixedfileinfo_offset = self.dword_align(
@@ -2943,34 +2961,34 @@
             self.__VS_FIXEDFILEINFO_format__,
             raw_data[fixedfileinfo_offset:],
             file_offset = start_offset+fixedfileinfo_offset )
-        
+
         if not fixedfileinfo_struct:
             return
-        
+
         # Set the PE object's VS_FIXEDFILEINFO to this one
         #
         self.VS_FIXEDFILEINFO = fixedfileinfo_struct
-        
-        
+
+
         # Start parsing all the StringFileInfo and VarFileInfo structures
         #
-        
+
         # Get the first one
         #
         stringfileinfo_offset = self.dword_align(
             fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
             version_struct.OffsetToData)
         original_stringfileinfo_offset = stringfileinfo_offset
-        
-        
+
+
         # Set the PE object's attribute that will contain them all.
         #
         self.FileInfo = list()
-        
-        
+
+
         while True:
-            
-            # Process the StringFileInfo/VarFileInfo struct
+
+            # Process the StringFileInfo/VarFileInfo structure
             #
             stringfileinfo_struct = self.__unpack_data__(
                 self.__StringFileInfo_format__,
@@ -2981,7 +2999,7 @@
                 self.__warnings.append(
                     'Error parsing StringFileInfo/VarFileInfo struct' )
                 return None
-            
+
             # Get the subsequent string defining the structure.
             #
             ustr_offset = ( version_struct.OffsetToData +
@@ -2994,42 +3012,42 @@
                     'attempting to read StringFileInfo string. Can\'t ' +
                     'read unicode string at offset 0x%x' %  ( ustr_offset ) )
                 break
-            
+
             # Set such string as the Key attribute
             #
             stringfileinfo_struct.Key = stringfileinfo_string
-            
-            
+
+
             # Append the structure to the PE object's list
             #
             self.FileInfo.append(stringfileinfo_struct)
-            
-            
+
+
             # Parse a StringFileInfo entry
             #
             if stringfileinfo_string and stringfileinfo_string.startswith(u'StringFileInfo'):
-                
+
                 if stringfileinfo_struct.Type in (0,1) and stringfileinfo_struct.ValueLength == 0:
-                    
+
                     stringtable_offset = self.dword_align(
                         stringfileinfo_offset + stringfileinfo_struct.sizeof() +
                             2*(len(stringfileinfo_string)+1),
                         version_struct.OffsetToData)
-                    
+
                     stringfileinfo_struct.StringTable = list()
-                    
+
                     # Process the String Table entries
                     #
                     while True:
-                        
+
                         stringtable_struct = self.__unpack_data__(
                             self.__StringTable_format__,
                             raw_data[stringtable_offset:],
                             file_offset = start_offset+stringtable_offset )
-                        
+
                         if not stringtable_struct:
                             break
-                        
+
                         ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
                             stringtable_struct.sizeof() )
                         try:
@@ -3040,30 +3058,30 @@
                                 'attempting to read StringTable string. Can\'t ' +
                                 'read unicode string at offset 0x%x' % ( ustr_offset ) )
                             break
-                        
+
                         stringtable_struct.LangID = stringtable_string
                         stringtable_struct.entries = dict()
                         stringtable_struct.entries_offsets = dict()
                         stringtable_struct.entries_lengths = dict()
                         stringfileinfo_struct.StringTable.append(stringtable_struct)
-                        
+
                         entry_offset = self.dword_align(
                             stringtable_offset + stringtable_struct.sizeof() +
                                 2*(len(stringtable_string)+1),
                             version_struct.OffsetToData)
-                        
+
                         # Process all entries in the string table
                         #
-                        
+
                         while entry_offset < stringtable_offset + stringtable_struct.Length:
-                            
+
                             string_struct = self.__unpack_data__(
                                 self.__String_format__, raw_data[entry_offset:],
                                 file_offset = start_offset+entry_offset )
-                            
+
                             if not string_struct:
                                 break
-                            
+
                             ustr_offset = ( version_struct.OffsetToData + entry_offset +
                                 string_struct.sizeof() )
                             try:
@@ -3075,11 +3093,11 @@
                                     'attempting to read StringTable Key string. Can\'t ' +
                                     'read unicode string at offset 0x%x' % ( ustr_offset ) )
                                 break
-                            
+
                             value_offset = self.dword_align(
                                 2*(len(key)+1) + entry_offset + string_struct.sizeof(),
                                 version_struct.OffsetToData)
-                            
+
                             ustr_offset = version_struct.OffsetToData + value_offset
                             try:
                                 value = self.get_string_u_at_rva( ustr_offset,
@@ -3092,70 +3110,70 @@
                                     'Can\'t read unicode string at offset 0x%x' % (
                                     ustr_offset ) )
                                 break
-                            
+
                             if string_struct.Length == 0:
                                 entry_offset = stringtable_offset + stringtable_struct.Length
                             else:
                                 entry_offset = self.dword_align(
                                     string_struct.Length+entry_offset, version_struct.OffsetToData)
-                            
+
                             key_as_char = []
                             for c in key:
                                 if ord(c) >= 0x80:
                                     key_as_char.append('\\x%02x' % ord(c))
                                 else:
                                     key_as_char.append(c)
-                            
+
                             key_as_char = ''.join(key_as_char)
 
                             setattr(stringtable_struct, key_as_char, value)
                             stringtable_struct.entries[key] = value
                             stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
                             stringtable_struct.entries_lengths[key] = (len(key), len(value))
-                        
-                        
+
+
                         new_stringtable_offset = self.dword_align(
                             stringtable_struct.Length + stringtable_offset,
                             version_struct.OffsetToData)
-                            
+
                         # check if the entry is crafted in a way that would lead to an infinite
                         # loop and break if so
                         #
                         if new_stringtable_offset == stringtable_offset:
                             break
                         stringtable_offset = new_stringtable_offset
-                            
+
                         if stringtable_offset >= stringfileinfo_struct.Length:
                             break
-            
+
             # Parse a VarFileInfo entry
             #
             elif stringfileinfo_string and stringfileinfo_string.startswith( u'VarFileInfo' ):
-                
+
                 varfileinfo_struct = stringfileinfo_struct
                 varfileinfo_struct.name = 'VarFileInfo'
-                
+
                 if varfileinfo_struct.Type in (0, 1) and varfileinfo_struct.ValueLength == 0:
-                    
+
                     var_offset = self.dword_align(
                         stringfileinfo_offset + varfileinfo_struct.sizeof() +
                             2*(len(stringfileinfo_string)+1),
                         version_struct.OffsetToData)
-                    
+
                     varfileinfo_struct.Var = list()
-                    
+
                     # Process all entries
                     #
-                    
+
                     while True:
                         var_struct = self.__unpack_data__(
                             self.__Var_format__,
                             raw_data[var_offset:],
                             file_offset = start_offset+var_offset )
-                        
+
                         if not var_struct:
                             break
-                        
+
                         ustr_offset = ( version_struct.OffsetToData + var_offset +
                             var_struct.sizeof() )
                         try:
@@ -3166,64 +3184,64 @@
                                 'attempting to read VarFileInfo Var string. ' +
                                 'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
                             break
-                        
-                        
+
+
                         varfileinfo_struct.Var.append(var_struct)
-                        
+
                         varword_offset = self.dword_align(
                             2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
                             version_struct.OffsetToData)
                         orig_varword_offset = varword_offset
-                        
+
                         while varword_offset < orig_varword_offset + var_struct.ValueLength:
                             word1 = self.get_word_from_data(
                                 raw_data[varword_offset:varword_offset+2], 0)
                             word2 = self.get_word_from_data(
                                 raw_data[varword_offset+2:varword_offset+4], 0)
                             varword_offset += 4
-                            
+
                             if isinstance(word1, (int, long)) and isinstance(word2, (int, long)):
                                 var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
-                        
+
                         var_offset = self.dword_align(
                             var_offset+var_struct.Length, version_struct.OffsetToData)
-                        
+
                         if var_offset <= var_offset+var_struct.Length:
                             break
-                
-            
+
+
             # Increment and align the offset
             #
             stringfileinfo_offset = self.dword_align(
                 stringfileinfo_struct.Length+stringfileinfo_offset,
                 version_struct.OffsetToData)
-            
+
             # Check if all the StringFileInfo and VarFileInfo items have been processed
             #
             if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
                 break
-            
-        
-    
+
+
+
     def parse_export_directory(self, rva, size):
         """Parse the export directory.
-        
+
         Given the RVA of the export directory, it will process all
         its entries.
-        
+
         The exports will be made available through a list "exports"
         containing a tuple with the following elements:
-            
+
             (ordinal, symbol_address, symbol_name)
-        
+
         And also through a dictionary "exports_by_ordinal" whose keys
         will be the ordinals and the values tuples of the from:
-            
+
             (symbol_address, symbol_name)
-        
+
         The symbol addresses are relative, not absolute.
         """
-        
+
         try:
             export_dir =  self.__unpack_data__(
                 self.__IMAGE_EXPORT_DIRECTORY_format__,
@@ -3233,17 +3251,17 @@
             self.__warnings.append(
                 'Error parsing export directory at RVA: 0x%x' % ( rva ) )
             return
-        
+
         if not export_dir:
             return
-        
+
         # We keep track of the bytes left in the file and use it to set a upper
         # bound in the number of items that can be read from the different
         # arrays
         #
         def length_until_eof(rva):
             return len(self.__data__) - self.get_offset_from_rva(rva)
-        
+
         try:
             address_of_names = self.get_data(
                 export_dir.AddressOfNames, min( length_until_eof(export_dir.AddressOfNames), export_dir.NumberOfNames*4))
@@ -3255,11 +3273,11 @@
             self.__warnings.append(
                 'Error parsing export directory at RVA: 0x%x' % ( rva ) )
             return
-        
+
         exports = []
-        
+
         max_failed_entries_before_giving_up = 10
-        
+
         section = self.get_section_by_rva(export_dir.AddressOfNames)
         if not section:
             self.__warnings.append(
@@ -3268,7 +3286,7 @@
             return
         else:
             safety_boundary = section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfNames
-            
+
         for i in xrange( min( export_dir.NumberOfNames, safety_boundary/4) ):
             symbol_name_address = self.get_dword_from_data(address_of_names, i)
 
@@ -3276,7 +3294,7 @@
                 max_failed_entries_before_giving_up -= 1
                 if max_failed_entries_before_giving_up <= 0:
                     break
-                
+
             symbol_name = self.get_string_at_rva( symbol_name_address )
             try:
                 symbol_name_offset = self.get_offset_from_rva( symbol_name_address )
@@ -3285,11 +3303,11 @@
                 if max_failed_entries_before_giving_up <= 0:
                     break
                 continue
-            
+
             symbol_ordinal = self.get_word_from_data(
                 address_of_name_ordinals, i)
 
-            
+
             if symbol_ordinal is not None and symbol_ordinal*4 < len(address_of_functions):
                 symbol_address = self.get_dword_from_data(
                     address_of_functions, symbol_ordinal)
@@ -3300,11 +3318,11 @@
 
             if symbol_address is None or symbol_address == 0:
                 continue
-            
-            # If the funcion's RVA points within the export directory
+
+            # If the function's RVA points within the export directory
             # it will point to a string with the forwarded symbol's string
             # instead of pointing the the function start address.
-            
+
             if symbol_address >= rva and symbol_address < rva+size:
                 forwarder_str = self.get_string_at_rva(symbol_address)
                 try:
@@ -3314,7 +3332,7 @@
             else:
                 forwarder_str = None
                 forwarder_offset = None
-            
+
             exports.append(
                 ExportData(
                     pe = self,
@@ -3326,9 +3344,9 @@
                     name_offset = symbol_name_offset,
                     forwarder = forwarder_str,
                     forwarder_offset = forwarder_offset ))
-        
+
         ordinals = [exp.ordinal for exp in exports]
-        
+
         max_failed_entries_before_giving_up = 10
 
         section = self.get_section_by_rva(export_dir.AddressOfFunctions)
@@ -3339,23 +3357,23 @@
             return
         else:
             safety_boundary = section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfFunctions
-            
+
         safety_boundary = section.VirtualAddress + len(section.get_data()) - export_dir.AddressOfFunctions
-        
+
         for idx in xrange( min(export_dir.NumberOfFunctions, safety_boundary/4) ):
-            
+
             if not idx+export_dir.Base in ordinals:
                 try:
                     symbol_address = self.get_dword_from_data(
                         address_of_functions, idx)
                 except PEFormatError:
                     symbol_address = None
-                
+
                 if symbol_address is None:
                     max_failed_entries_before_giving_up -= 1
                     if max_failed_entries_before_giving_up <= 0:
                         break
-                
+
                 if symbol_address == 0:
                     continue
                 #
@@ -3372,19 +3390,19 @@
                         address = symbol_address,
                         name = None,
                         forwarder = forwarder_str))
-        
+
         return ExportDirData(
                 struct = export_dir,
                 symbols = exports)
-        
-    
+
+
     def dword_align(self, offset, base):
         return ((offset+base+3) & 0xfffffffcL) - (base & 0xfffffffcL)
-        
-    
+
+
     def parse_delay_import_directory(self, rva, size):
         """Walk and parse the delay import directory."""
-        
+
         import_descs =  []
         while True:
             try:
@@ -3395,19 +3413,19 @@
                 self.__warnings.append(
                     'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
                 break
-            
+
             import_desc =  self.__unpack_data__(
                 self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
                 data, file_offset = self.get_offset_from_rva(rva) )
-            
-            
-            # If the structure is all zeroes, we reached the end of the list
+
+
+            # If the structure is all zeros, we reached the end of the list
             if not import_desc or import_desc.all_zeroes():
                 break
-            
-            
+
+
             rva += import_desc.sizeof()
-            
+
             try:
                 import_data =  self.parse_imports(
                     import_desc.pINT,
@@ -3418,11 +3436,11 @@
                     'Error parsing the Delay import directory. ' +
                     'Invalid import data at RVA: 0x%x' % ( rva ) )
                 break
-            
+
             if not import_data:
                 continue
-            
-            
+
+
             dll = self.get_string_at_rva(import_desc.szName)
             if not is_valid_dos_filename(dll):
                 dll = '*invalid*'
@@ -3433,14 +3451,14 @@
                         struct = import_desc,
                         imports = import_data,
                         dll = dll))
-        
-        return import_descs
-                    
 
-    
+        return import_descs
+
+
+
     def parse_import_directory(self, rva, size):
         """Walk and parse the import directory."""
-        
+
         import_descs =  []
         while True:
             try:
@@ -3451,17 +3469,17 @@
                 self.__warnings.append(
                     'Error parsing the import directory at RVA: 0x%x' % ( rva ) )
                 break
-            
+
             import_desc =  self.__unpack_data__(
                 self.__IMAGE_IMPORT_DESCRIPTOR_format__,
                 data, file_offset = self.get_offset_from_rva(rva) )
-            
-            # If the structure is all zeroes, we reached the end of the list
+
+            # If the structure is all zeros, we reached the end of the list
             if not import_desc or import_desc.all_zeroes():
                 break
-            
+
             rva += import_desc.sizeof()
-            
+
             try:
                 import_data =  self.parse_imports(
                     import_desc.OriginalFirstThunk,
@@ -3472,22 +3490,21 @@
                     'Error parsing the import directory. ' +
                     'Invalid Import data at RVA: 0x%x (%s)' % ( rva, str(excp) ) )
                 break
-                #raise excp
-            
+
             if not import_data:
                 continue
-            
+
             dll = self.get_string_at_rva(import_desc.Name)
             if not is_valid_dos_filename(dll):
                 dll = '*invalid*'
-                
+
             if dll:
                 import_descs.append(
                     ImportDescData(
                         struct = import_desc,
                         imports = import_data,
                         dll = dll))
-        
+
         suspicious_imports = set([ 'LoadLibrary', 'GetProcAddress' ])
         suspicious_imports_count = 0
         total_symbols = 0
@@ -3501,32 +3518,32 @@
         if suspicious_imports_count == len(suspicious_imports) and total_symbols < 20:
             self.__warnings.append(
                 'Imported symbols contain entries typical of packed executables.' )
-            
-            
-        
+
+
+
         return import_descs
 
-    
-    
+
+
     def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain):
         """Parse the imported symbols.
-        
+
         It will fill a list, which will be available as the dictionary
         attribute "imports". Its keys will be the DLL names and the values
         all the symbols imported from that object.
         """
-        
+
         imported_symbols = []
-        
+
         # The following has been commented as a PE does not
-        # need to have the import data necessarily witin
+        # need to have the import data necessarily within
         # a section, it can keep it in gaps between sections
         # or overlapping other data.
         #
         #imports_section = self.get_section_by_rva(first_thunk)
         #if not imports_section:
         #    raise PEFormatError, 'Invalid/corrupt imports.'
-        
+
         # Import Lookup Table. Contains ordinals or pointers to strings.
         ilt = self.get_import_table(original_first_thunk)
         # Import Address Table. May have identical content to ILT if
@@ -3541,7 +3558,7 @@
             raise PEFormatError(
                 'Invalid Import Table information. ' +
                 'Both ILT and IAT appear to be broken.')
-        
+
         table = None
         if ilt:
             table = ilt
@@ -3549,7 +3566,7 @@
             table = iat
         else:
             return None
-        
+
         imp_offset = 4
         address_mask = 0x7fffffff
         if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
@@ -3558,17 +3575,17 @@
             ordinal_flag = IMAGE_ORDINAL_FLAG64
             imp_offset = 8
             address_mask = 0x7fffffffffffffffL
-        
+
         for idx in xrange(len(table)):
-            
+
             imp_ord = None
             imp_hint = None
             imp_name = None
             name_offset = None
             hint_name_table_rva = None
-            
+
             if table[idx].AddressOfData:
-                
+
                 # If imported by ordinal, we will append the ordinal number
                 #
                 if table[idx].AddressOfData & ordinal_flag:
@@ -3586,7 +3603,7 @@
                         imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
                         if not is_valid_function_name(imp_name):
                             imp_name = '*invalid*'
-                        
+
                         name_offset = self.get_offset_from_rva(table[idx].AddressOfData+2)
                     except PEFormatError, e:
                         pass
@@ -3594,12 +3611,12 @@
                 # by nriva: we want the ThunkRVA and ThunkOffset
                 thunk_offset = table[idx].get_file_offset()
                 thunk_rva = self.get_rva_from_offset(thunk_offset)
-                
+
             imp_address = first_thunk + self.OPTIONAL_HEADER.ImageBase + idx * imp_offset
-            
+
             struct_iat = None
             try:
-                
+
                 if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
                     imp_bound = iat[idx].AddressOfData
                     struct_iat = iat[idx]
@@ -3607,7 +3624,7 @@
                     imp_bound = None
             except IndexError:
                 imp_bound = None
-            
+
             # The file with hashes:
             #
             # MD5: bfe97192e8107d52dd7b4010d12b2924
@@ -3621,7 +3638,7 @@
             #
             if imp_ord == None and imp_name == None:
                 raise PEFormatError( 'Invalid entries in the Import Table. Aborting parsing.' )
-            
+
             if imp_name != '' and (imp_ord or imp_name):
                 imported_symbols.append(
                     ImportData(
@@ -3639,13 +3656,13 @@
                         hint_name_table_rva = hint_name_table_rva,
                         thunk_offset = thunk_offset,
                         thunk_rva = thunk_rva ))
-        
+
         return imported_symbols
 
 
-    
+
     def get_import_table(self, rva):
-        
+
         table = []
 
         # We need the ordinal flag for a simple heuristic
@@ -3664,22 +3681,22 @@
         addresses_of_data_set_64 = set()
         addresses_of_data_set_32 = set()
         while True and rva:
-            
+
             # if we see too many times the same entry we assume it could be
-            # a table containing bogus data (with malicious intenet or otherwise)
+            # a table containing bogus data (with malicious intent or otherwise)
             if repeated_address >= MAX_REPEATED_ADDRESSES:
                 return []
-                
-            # if the addresses point somwhere but the difference between the highest
+
+            # if the addresses point somewhere but the difference between the highest
             # and lowest address is larger than MAX_ADDRESS_SPREAD we assume a bogus
             # table as the addresses should be contained within a module
-            if (addresses_of_data_set_32 and 
+            if (addresses_of_data_set_32 and
                 max(addresses_of_data_set_32) - min(addresses_of_data_set_32) > MAX_ADDRESS_SPREAD ):
                 return []
-            if (addresses_of_data_set_64 and 
+            if (addresses_of_data_set_64 and
                 max(addresses_of_data_set_64) - min(addresses_of_data_set_64) > MAX_ADDRESS_SPREAD ):
                 return []
-                
+
             try:
                 data = self.get_data( rva, Structure(format).sizeof() )
             except PEFormatError, e:
@@ -3687,7 +3704,7 @@
                     'Error parsing the import table. ' +
                     'Invalid data at RVA: 0x%x' % ( rva ) )
                 return None
-            
+
             thunk_data = self.__unpack_data__(
                 format, data, file_offset=self.get_offset_from_rva(rva) )
 
@@ -3703,7 +3720,7 @@
                     # keep track of the RVAs seen and store them to study their
                     # properties. When certain non-standard features are detected
                     # the parsing will be aborted
-                    if (thunk_data.AddressOfData in addresses_of_data_set_32 or 
+                    if (thunk_data.AddressOfData in addresses_of_data_set_32 or
                         thunk_data.AddressOfData in addresses_of_data_set_64):
                         repeated_address += 1
                     if thunk_data.AddressOfData >= 2**32:
@@ -3713,100 +3730,100 @@
 
             if not thunk_data or thunk_data.all_zeroes():
                 break
-            
+
             rva += thunk_data.sizeof()
-            
+
             table.append(thunk_data)
-        
+
         return table
-    
-    
+
+
     def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None):
         """Returns the data corresponding to the memory layout of the PE file.
-        
+
         The data includes the PE header and the sections loaded at offsets
         corresponding to their relative virtual addresses. (the VirtualAddress
         section header member).
         Any offset in this data corresponds to the absolute memory address
         ImageBase+offset.
-        
+
         The optional argument 'max_virtual_address' provides with means of limiting
         which section are processed.
         Any section with their VirtualAddress beyond this value will be skipped.
         Normally, sections with values beyond this range are just there to confuse
         tools. It's a common trick to see in packed executables.
-        
+
         If the 'ImageBase' optional argument is supplied, the file's relocations
         will be applied to the image by calling the 'relocate_image()' method. Beware
         that the relocation information is applied permanently.
         """
-        
+
         # Rebase if requested
         #
         if ImageBase is not None:
-            
+
             # Keep a copy of the image's data before modifying it by rebasing it
             #
             original_data = self.__data__
-            
+
             self.relocate_image(ImageBase)
-            
+
         # Collect all sections in one code block
         #mapped_data = self.header
         mapped_data = '' + self.__data__[:]
         for section in self.sections:
-            
+
             # Miscellaneous integrity tests.
             # Some packer will set these to bogus values to
             # make tools go nuts.
             #
             if section.Misc_VirtualSize == 0 or section.SizeOfRawData == 0:
                 continue
-            
+
             if section.SizeOfRawData > len(self.__data__):
                 continue
-            
+
             if self.adjust_FileAlignment( section.PointerToRawData,
                 self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
-                
+
                 continue
-            
-            VirtualAddress_adj = self.adjust_SectionAlignment( section.VirtualAddress, 
+
+            VirtualAddress_adj = self.adjust_SectionAlignment( section.VirtualAddress,
                 self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment )
-            
+
             if VirtualAddress_adj >= max_virtual_address:
                 continue
-            
+
             padding_length = VirtualAddress_adj - len(mapped_data)
-            
+
             if padding_length>0:
                 mapped_data += '\0'*padding_length
             elif padding_length<0:
                 mapped_data = mapped_data[:padding_length]
-            
+
             mapped_data += section.get_data()
-            
+
         # If the image was rebased, restore it to its original form
         #
         if ImageBase is not None:
             self.__data__ = original_data
-            
+
         return mapped_data
 
 
     def get_resources_strings(self):
         """Returns a list of all the strings found withing the resources (if any).
-        
+
         This method will scan all entries in the resources directory of the PE, if
         there is one, and will return a list() with the strings.
-        
+
         An empty list will be returned otherwise.
         """
-        
+
         resources_strings = list()
-        
+
         if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
-            
+
             for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
                 if hasattr(resource_type, 'directory'):
                     for resource_id in resource_type.directory.entries:
@@ -3814,19 +3831,19 @@
                             if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
                                 for res_string in resource_id.directory.strings.values():
                                     resources_strings.append( res_string )
-                                    
+
         return resources_strings
 
 
     def get_data(self, rva=0, length=None):
         """Get data regardless of the section where it lies on.
-        
+
         Given a RVA and the size of the chunk to retrieve, this method
         will find the section where the data lies and return the data.
         """
-        
+
         s = self.get_section_by_rva(rva)
-        
+
         if length:
             end = rva + length
         else:
@@ -3835,7 +3852,7 @@
         if not s:
             if rva < len(self.header):
                 return self.header[rva:end]
-            
+
             # Before we give up we check whether the file might
             # contain the data anyway. There are cases of PE files
             # without sections that rely on windows loading the first
@@ -3847,19 +3864,19 @@
             #
             if rva < len(self.__data__):
                 return self.__data__[rva:end]
-            
+
             raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
-        
+
         return s.get_data(rva, length)
 
-    
+
     def get_rva_from_offset(self, offset):
         """Get the RVA corresponding to this file offset. """
-        
+
         s = self.get_section_by_offset(offset)
         if not s:
             if self.sections:
-                lowest_rva = min( [ self.adjust_SectionAlignment( s.VirtualAddress, 
+                lowest_rva = min( [ self.adjust_SectionAlignment( s.VirtualAddress,
                     self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] )
                 if offset < lowest_rva:
                     # We will assume that the offset lies within the headers, or
@@ -3875,14 +3892,14 @@
                 return offset
             #raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
         return s.get_rva_from_offset(offset)
-    
+
     def get_offset_from_rva(self, rva):
         """Get the file offset corresponding to this RVA.
-        
+
         Given a RVA , this method will find the section where the
         data lies and return the offset within the file.
         """
-        
+
         s = self.get_section_by_rva(rva)
         if not s:
 
@@ -3892,12 +3909,12 @@
             # cases the RVA should equal the offset
             if rva<len(self.__data__):
                 return rva
-                
+
             raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
-        
+
         return s.get_offset_from_rva(rva)
-            
-    
+
+
     def get_string_at_rva(self, rva):
         """Get an ASCII string located at the given address."""
 
@@ -3907,21 +3924,21 @@
         s = self.get_section_by_rva(rva)
         if not s:
             return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH])
-        
+
         return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) )
-        
-    
+
+
     def get_string_from_data(self, offset, data):
         """Get an ASCII string from within the data."""
-        
+
         # OC Patch
         b = None
-        
+
         try:
             b = data[offset]
         except IndexError:
             return ''
-        
+
         s = ''
         while ord(b):
             s += b
@@ -3930,94 +3947,92 @@
                 b = data[offset]
             except IndexError:
                 break
-        
+
         return s
-                
-    
+
+
     def get_string_u_at_rva(self, rva, max_length = 2**16):
         """Get an Unicode string located at the given address."""
-        
+
         try:
             # If the RVA is invalid all would blow up. Some EXEs seem to be
             # specially nasty and have an invalid RVA.
             data = self.get_data(rva, 2)
         except PEFormatError, e:
             return None
-        
-        #length = struct.unpack('<H', data)[0]
-        
+
         s = u''
         for idx in xrange(max_length):
             try:
                 uchr = struct.unpack('<H', self.get_data(rva+2*idx, 2))[0]
             except struct.error:
                 break
-            
+
             if unichr(uchr) == u'\0':
                 break
             s += unichr(uchr)
-        
+
         return s
 
-    
+
     def get_section_by_offset(self, offset):
         """Get the section containing the given file offset."""
-        
+
         sections = [s for s in self.sections if s.contains_offset(offset)]
-        
+
         if sections:
             return sections[0]
-        
+
         return None
 
-    
+
     def get_section_by_rva(self, rva):
         """Get the section containing the given address."""
-        
+
         sections = [s for s in self.sections if s.contains_rva(rva)]
-        
+
         if sections:
             return sections[0]
-        
+
         return None
-    
+
     def __str__(self):
         return self.dump_info()
-        
-    
+
+
     def print_info(self):
         """Print all the PE header information in a human readable from."""
         print self.dump_info()
-        
-    
+
+
     def dump_info(self, dump=None):
         """Dump all the PE header information into human readable string."""
-        
-        
+
+
         if dump is None:
             dump = Dump()
-        
+
         warnings = self.get_warnings()
         if warnings:
             dump.add_header('Parsing Warnings')
             for warning in warnings:
                 dump.add_line(warning)
                 dump.add_newline()
-        
-        
+
+
         dump.add_header('DOS_HEADER')
         dump.add_lines(self.DOS_HEADER.dump())
         dump.add_newline()
-        
+
         dump.add_header('NT_HEADERS')
         dump.add_lines(self.NT_HEADERS.dump())
         dump.add_newline()
-        
+
         dump.add_header('FILE_HEADER')
         dump.add_lines(self.FILE_HEADER.dump())
-        
+
         image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
-        
+
         dump.add('Flags: ')
         flags = []
         for flag in image_flags:
@@ -4025,13 +4040,13 @@
                 flags.append(flag[0])
         dump.add_line(', '.join(flags))
         dump.add_newline()
-        
+
         if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
             dump.add_header('OPTIONAL_HEADER')
             dump.add_lines(self.OPTIONAL_HEADER.dump())
-        
+
         dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
-        
+
         dump.add('DllCharacteristics: ')
         flags = []
         for flag in dll_characteristics_flags:
@@ -4039,12 +4054,12 @@
                 flags.append(flag[0])
         dump.add_line(', '.join(flags))
         dump.add_newline()
-        
-        
+
+
         dump.add_header('PE Sections')
-        
+
         section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
-        
+
         for section in self.sections:
             dump.add_lines(section.dump())
             dump.add('Flags: ')
@@ -4063,42 +4078,42 @@
             if sha512 is not None:
                 dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() )
             dump.add_newline()
-            
-        
-        
+
+
+
         if (hasattr(self, 'OPTIONAL_HEADER') and
             hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
-            
+
             dump.add_header('Directories')
             for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
                 directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
                 dump.add_lines(directory.dump())
             dump.add_newline()
-            
-        
+
+
         def convert_char(char):
             if char in string.ascii_letters or char in string.digits or char in string.punctuation or char in string.whitespace:
                 return char
             else:
                 return r'\x%02x' % ord(char)
-                
+
         def convert_to_printable(s):
             return ''.join([convert_char(c) for c in s])
-        
+
         if hasattr(self, 'VS_VERSIONINFO'):
             dump.add_header('Version Information')
             dump.add_lines(self.VS_VERSIONINFO.dump())
             dump.add_newline()
-            
+
             if hasattr(self, 'VS_FIXEDFILEINFO'):
                 dump.add_lines(self.VS_FIXEDFILEINFO.dump())
                 dump.add_newline()
-            
+
             if hasattr(self, 'FileInfo'):
                 for entry in self.FileInfo:
                     dump.add_lines(entry.dump())
                     dump.add_newline()
-                    
+
                     if hasattr(entry, 'StringTable'):
                         for st_entry in entry.StringTable:
                             [dump.add_line('  '+line) for line in st_entry.dump()]
@@ -4109,20 +4124,20 @@
                                     convert_to_printable(str_entry[0]) + ': ' +
                                     convert_to_printable(str_entry[1]) )
                         dump.add_newline()
-                    
+
                     elif hasattr(entry, 'Var'):
                         for var_entry in entry.Var:
                             if hasattr(var_entry, 'entry'):
                                 [dump.add_line('  '+line) for line in var_entry.dump()]
                                 dump.add_line(
-                                    '    ' + 
+                                    '    ' +
                                     convert_to_printable(var_entry.entry.keys()[0]) +
                                     ': ' + var_entry.entry.values()[0])
-                        
+
                         dump.add_newline()
 
-            
-        
+
+
         if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
             dump.add_header('Exported symbols')
             dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
@@ -4136,51 +4151,51 @@
                         dump.add_line(' forwarder: %s' % export.forwarder)
                     else:
                         dump.add_newline()
-            
+
             dump.add_newline()
-        
+
         if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
             dump.add_header('Imported symbols')
             for module in self.DIRECTORY_ENTRY_IMPORT:
                 dump.add_lines(module.struct.dump())
                 dump.add_newline()
                 for symbol in module.imports:
-                    
+
                     if symbol.import_by_ordinal is True:
                         dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
                             module.dll, str(symbol.ordinal)))
                     else:
                         dump.add('%s.%s Hint[%s]' % (
                             module.dll, symbol.name, str(symbol.hint)))
-                    
+
                     if symbol.bound:
                         dump.add_line(' Bound: 0x%08X' % (symbol.bound))
                     else:
                         dump.add_newline()
                 dump.add_newline()
-        
-        
+
+
         if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
             dump.add_header('Bound imports')
             for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
-                
+
                 dump.add_lines(bound_imp_desc.struct.dump())
                 dump.add_line('DLL: %s' % bound_imp_desc.name)
                 dump.add_newline()
-                
+
                 for bound_imp_ref in bound_imp_desc.entries:
                     dump.add_lines(bound_imp_ref.struct.dump(), 4)
                     dump.add_line('DLL: %s' % bound_imp_ref.name, 4)
                     dump.add_newline()
 
-        
+
         if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
             dump.add_header('Delay Imported symbols')
             for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
-                
+
                 dump.add_lines(module.struct.dump())
                 dump.add_newline()
-                
+
                 for symbol in module.imports:
                     if symbol.import_by_ordinal is True:
                         dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
@@ -4188,21 +4203,21 @@
                     else:
                         dump.add('%s.%s Hint[%s]' % (
                             module.dll, symbol.name, str(symbol.hint)))
-                    
+
                     if symbol.bound:
                         dump.add_line(' Bound: 0x%08X' % (symbol.bound))
                     else:
                         dump.add_newline()
                 dump.add_newline()
-        
-        
+
+
         if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
             dump.add_header('Resource directory')
-            
+
             dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
-            
+
             for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
-                
+
                 if resource_type.name is not None:
                     dump.add_line('Name: [%s]' % resource_type.name, 2)
                 else:
@@ -4210,31 +4225,31 @@
                         resource_type.struct.Id, RESOURCE_TYPE.get(
                             resource_type.struct.Id, '-')),
                         2)
-                
+
                 dump.add_lines(resource_type.struct.dump(), 2)
-                
+
                 if hasattr(resource_type, 'directory'):
-                    
+
                     dump.add_lines(resource_type.directory.struct.dump(), 4)
-                    
+
                     for resource_id in resource_type.directory.entries:
-                        
+
                         if resource_id.name is not None:
                             dump.add_line('Name: [%s]' % resource_id.name, 6)
                         else:
                             dump.add_line('Id: [0x%X]' % resource_id.struct.Id, 6)
-                        
+
                         dump.add_lines(resource_id.struct.dump(), 6)
-                        
+
                         if hasattr(resource_id, 'directory'):
                             dump.add_lines(resource_id.directory.struct.dump(), 8)
-                            
+
                             for resource_lang in resource_id.directory.entries:
                                 if hasattr(resource_lang, 'data'):
                                     dump.add_line('\\--- LANG [%d,%d][%s,%s]' % (
                                         resource_lang.data.lang,
                                         resource_lang.data.sublang,
-                                        LANG.get(resource_lang.data.lang, '*unknown*'), 
+                                        LANG.get(resource_lang.data.lang, '*unknown*'),
                                         get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang ) ), 8)
                                     dump.add_lines(resource_lang.struct.dump(), 10)
                                     dump.add_lines(resource_lang.data.struct.dump(), 12)
@@ -4242,30 +4257,30 @@
                                 dump.add_line( '[STRINGS]' , 10 )
                                 for idx, res_string in resource_id.directory.strings.items():
                                     dump.add_line( '%6d: %s' % (idx, convert_to_printable(res_string) ), 12 )
-                                        
+
                 dump.add_newline()
-            
+
             dump.add_newline()
-        
-        
+
+
         if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
              self.DIRECTORY_ENTRY_TLS and
              self.DIRECTORY_ENTRY_TLS.struct ):
-            
+
             dump.add_header('TLS')
             dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
             dump.add_newline()
-        
-        
+
+
         if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
              self.DIRECTORY_ENTRY_LOAD_CONFIG and
              self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
-             
+
             dump.add_header('LOAD_CONFIG')
             dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump())
             dump.add_newline()
-        
-        
+
+
         if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
             dump.add_header('Debug information')
             for dbg in self.DIRECTORY_ENTRY_DEBUG:
@@ -4275,8 +4290,8 @@
                 except KeyError:
                     dump.add_line('Type: 0x%x(Unknown)' % dbg.struct.Type)
                 dump.add_newline()
-        
-        
+
+
         if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
             dump.add_header('Base relocations')
             for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
@@ -4290,9 +4305,9 @@
                             reloc.rva, reloc.type), 4)
                 dump.add_newline()
 
-        
+
         return dump.get_text()
-    
+
     # OC Patch
     def get_physical_by_rva(self, rva):
         """Gets the physical address in the PE file from an RVA value."""
@@ -4301,401 +4316,401 @@
         except Exception:
             return None
 
-    
+
     ##
-    # Double-Word get/set
+    # Double-Word get / set
     ##
-    
+
     def get_data_from_dword(self, dword):
         """Return a four byte string representing the double word value. (little endian)."""
         return struct.pack('<L', dword & 0xffffffff)
 
-    
+
     def get_dword_from_data(self, data, offset):
         """Convert four bytes of data to a double word (little endian)
-        
+
         'offset' is assumed to index into a dword array. So setting it to
         N will return a dword out of the data starting at offset N*4.
-        
+
         Returns None if the data can't be turned into a double word.
         """
-        
+
         if (offset+1)*4 > len(data):
             return None
-        
+
         return struct.unpack('<I', data[offset*4:(offset+1)*4])[0]
-        
-    
+
+
     def get_dword_at_rva(self, rva):
         """Return the double word value at the given RVA.
-        
+
         Returns None if the value can't be read, i.e. the RVA can't be mapped
         to a file offset.
         """
-        
+
         try:
             return self.get_dword_from_data(self.get_data(rva)[:4], 0)
         except PEFormatError:
             return None
 
-    
+
     def get_dword_from_offset(self, offset):
         """Return the double word value at the given file offset. (little endian)"""
-        
+
         if offset+4 > len(self.__data__):
             return None
-        
+
         return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
 
-    
+
     def set_dword_at_rva(self, rva, dword):
         """Set the double word value at the file offset corresponding to the given RVA."""
         return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
 
-    
+
     def set_dword_at_offset(self, offset, dword):
         """Set the double word value at the given file offset."""
         return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
 
 
-    
+
     ##
-    # Word get/set
+    # Word get / set
     ##
-    
+
     def get_data_from_word(self, word):
         """Return a two byte string representing the word value. (little endian)."""
         return struct.pack('<H', word)
 
-    
+
     def get_word_from_data(self, data, offset):
         """Convert two bytes of data to a word (little endian)
-        
+
         'offset' is assumed to index into a word array. So setting it to
         N will return a dword out of the data starting at offset N*2.
-        
+
         Returns None if the data can't be turned into a word.
         """
-        
+
         if (offset+1)*2 > len(data):
             return None
-        
+
         return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
 
-    
+
     def get_word_at_rva(self, rva):
         """Return the word value at the given RVA.
-        
+
         Returns None if the value can't be read, i.e. the RVA can't be mapped
         to a file offset.
         """
-        
+
         try:
             return self.get_word_from_data(self.get_data(rva)[:2], 0)
         except PEFormatError:
             return None
 
-    
+
     def get_word_from_offset(self, offset):
         """Return the word value at the given file offset. (little endian)"""
-        
+
         if offset+2 > len(self.__data__):
             return None
-        
+
         return self.get_word_from_data(self.__data__[offset:offset+2], 0)
 
-    
+
     def set_word_at_rva(self, rva, word):
         """Set the word value at the file offset corresponding to the given RVA."""
         return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
 
-    
+
     def set_word_at_offset(self, offset, word):
         """Set the word value at the given file offset."""
         return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
 
-    
+
     ##
-    # Quad-Word get/set
+    # Quad-Word get / set
     ##
-    
+
     def get_data_from_qword(self, word):
         """Return a eight byte string representing the quad-word value. (little endian)."""
         return struct.pack('<Q', word)
 
-    
+
     def get_qword_from_data(self, data, offset):
         """Convert eight bytes of data to a word (little endian)
-        
+
         'offset' is assumed to index into a word array. So setting it to
         N will return a dword out of the data starting at offset N*8.
-        
+
         Returns None if the data can't be turned into a quad word.
         """
-        
+
         if (offset+1)*8 > len(data):
             return None
-        
+
         return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0]
 
-    
+
     def get_qword_at_rva(self, rva):
         """Return the quad-word value at the given RVA.
-        
+
         Returns None if the value can't be read, i.e. the RVA can't be mapped
         to a file offset.
         """
-        
+
         try:
             return self.get_qword_from_data(self.get_data(rva)[:8], 0)
         except PEFormatError:
             return None
 
-    
+
     def get_qword_from_offset(self, offset):
         """Return the quad-word value at the given file offset. (little endian)"""
-        
+
         if offset+8 > len(self.__data__):
             return None
-        
+
         return self.get_qword_from_data(self.__data__[offset:offset+8], 0)
 
-    
+
     def set_qword_at_rva(self, rva, qword):
         """Set the quad-word value at the file offset corresponding to the given RVA."""
         return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
 
-    
+
     def set_qword_at_offset(self, offset, qword):
         """Set the quad-word value at the given file offset."""
         return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
 
 
-    
+
     ##
     # Set bytes
     ##
-    
-    
+
+
     def set_bytes_at_rva(self, rva, data):
         """Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
-        
+
         Return True if successful, False otherwise. It can fail if the
         offset is outside the file's boundaries.
         """
-        
+
         if not isinstance(data, str):
             raise TypeError('data should be of type: str')
 
         offset = self.get_physical_by_rva(rva)
         if not offset:
             return False
-        
+
         return self.set_bytes_at_offset(offset, data)
-        
-    
+
+
     def set_bytes_at_offset(self, offset, data):
         """Overwrite the bytes at the given file offset with the given string.
-        
+
         Return True if successful, False otherwise. It can fail if the
         offset is outside the file's boundaries.
         """
-        
+
         if not isinstance(data, str):
             raise TypeError('data should be of type: str')
-        
+
         if offset >= 0 and offset < len(self.__data__):
             self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] )
         else:
             return False
-        
+
         return True
-    
+
 
     def merge_modified_section_data(self):
         """Update the PE image content with any individual section data that has been modified."""
-        
+
         for section in self.sections:
             section_data_start = self.adjust_FileAlignment( section.PointerToRawData,
                 self.OPTIONAL_HEADER.FileAlignment )
             section_data_end = section_data_start+section.SizeOfRawData
             if section_data_start < len(self.__data__) and section_data_end < len(self.__data__):
                 self.__data__ = self.__data__[:section_data_start] + section.get_data() + self.__data__[section_data_end:]
-        
-    
+
+
     def relocate_image(self, new_ImageBase):
         """Apply the relocation information to the image using the provided new image base.
-        
+
         This method will apply the relocation information to the image. Given the new base,
         all the relocations will be processed and both the raw data and the section's data
         will be fixed accordingly.
         The resulting image can be retrieved as well through the method:
-            
+
             get_memory_mapped_image()
-        
+
         In order to get something that would more closely match what could be found in memory
         once the Windows loader finished its work.
         """
-        
+
         relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
-        
-        
+
+
         for reloc in self.DIRECTORY_ENTRY_BASERELOC:
-            
+
             virtual_address = reloc.struct.VirtualAddress
             size_of_block = reloc.struct.SizeOfBlock
-            
+
             # We iterate with an index because if the relocation is of type
             # IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
             # at once and skip it for the next iteration
             #
             entry_idx = 0
             while entry_idx<len(reloc.entries):
-                
+
                 entry = reloc.entries[entry_idx]
                 entry_idx += 1
-                
+
                 if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
                     # Nothing to do for this type of relocation
                     pass
-                
+
                 elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
-                    # Fix the high 16bits of a relocation
+                    # Fix the high 16-bits of a relocation
                     #
-                    # Add high 16bits of relocation_difference to the
-                    # 16bit value at RVA=entry.rva
-                    
+                    # Add high 16-bits of relocation_difference to the
+                    # 16-bit value at RVA=entry.rva
+
                     self.set_word_at_rva(
                         entry.rva,
                         ( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
-                
+
                 elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
-                    # Fix the low 16bits of a relocation
+                    # Fix the low 16-bits of a relocation
                     #
-                    # Add low 16 bits of relocation_difference to the 16bit value
+                    # Add low 16 bits of relocation_difference to the 16-bit value
                     # at RVA=entry.rva
-                    
+
                     self.set_word_at_rva(
                         entry.rva,
                         ( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
-                
+
                 elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
-                    # Handle all high and low parts of a 32bit relocation
+                    # Handle all high and low parts of a 32-bit relocation
                     #
                     # Add relocation_difference to the value at RVA=entry.rva
-                    
+
                     self.set_dword_at_rva(
                         entry.rva,
                         self.get_dword_at_rva(entry.rva)+relocation_difference)
-                
+
                 elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
-                    # Fix the high 16bits of a relocation and adjust
+                    # Fix the high 16-bits of a relocation and adjust
                     #
-                    # Add high 16bits of relocation_difference to the 32bit value
-                    # composed from the (16bit value at RVA=entry.rva)<<16 plus
-                    # the 16bit value at the next relocation entry.
+                    # Add high 16-bits of relocation_difference to the 32-bit value
+                    # composed from the (16-bit value at RVA=entry.rva)<<16 plus
+                    # the 16-bit value at the next relocation entry.
                     #
-                    
+
                     # If the next entry is beyond the array's limits,
                     # abort... the table is corrupt
                     #
                     if entry_idx == len(reloc.entries):
                         break
-                    
+
                     next_entry = reloc.entries[entry_idx]
                     entry_idx += 1
                     self.set_word_at_rva( entry.rva,
                         ((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
                         relocation_difference & 0xffff0000) >> 16 )
-                
+
                 elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
-                    # Apply the difference to the 64bit value at the offset
+                    # Apply the difference to the 64-bit value at the offset
                     # RVA=entry.rva
-                    
+
                     self.set_qword_at_rva(
                         entry.rva,
                         self.get_qword_at_rva(entry.rva) + relocation_difference)
-                
-    
+
+
     def verify_checksum(self):
-        
+
         return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
-        
-    
+
+
     def generate_checksum(self):
-        
+
         # This will make sure that the data representing the PE image
         # is updated with any changes that might have been made by
         # assigning values to header fields as those are not automatically
         # updated upon assignment.
         #
         self.__data__ = self.write()
-        
+
         # Get the offset to the CheckSum field in the OptionalHeader
         #
         checksum_offset = self.OPTIONAL_HEADER.__file_offset__ + 0x40 # 64
-        
+
         checksum = 0
-        
+
         # Verify the data is dword-aligned. Add padding if needed
         #
         remainder = len(self.__data__) % 4
         data = self.__data__ + ( '\0' * ((4-remainder) * ( remainder != 0 )) )
-        
+
         for i in range( len( data ) / 4 ):
-            
+
             # Skip the checksum field
             #
             if i == checksum_offset / 4:
                 continue
-            
+
             dword = struct.unpack('I', data[ i*4 : i*4+4 ])[0]
             checksum = (checksum & 0xffffffff) + dword + (checksum>>32)
             if checksum > 2**32:
                 checksum = (checksum & 0xffffffff) + (checksum >> 32)
-        
+
         checksum = (checksum & 0xffff) + (checksum >> 16)
         checksum = (checksum) + (checksum >> 16)
         checksum = checksum & 0xffff
-        
+
         # The length is the one of the original data, not the padded one
         #
         return checksum + len(self.__data__)
-    
-    
+
+
     def is_exe(self):
         """Check whether the file is a standard executable.
-        
+
         This will return true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set
         and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either.
         """
-        
+
         EXE_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_EXECUTABLE_IMAGE']
-        
-        if (not self.is_dll()) and (not self.is_driver()) and ( 
+
+        if (not self.is_dll()) and (not self.is_driver()) and (
                 EXE_flag & self.FILE_HEADER.Characteristics) == EXE_flag:
             return True
-        
+
         return False
-    
-    
+
+
     def is_dll(self):
         """Check whether the file is a standard DLL.
-        
+
         This will return true only if the image has the IMAGE_FILE_DLL flag set.
         """
-        
+
         DLL_flag = IMAGE_CHARACTERISTICS['IMAGE_FILE_DLL']
-        
+
         if ( DLL_flag & self.FILE_HEADER.Characteristics) == DLL_flag:
             return True
-            
+
         return False
-    
-    
+
+
     def is_driver(self):
         """Check whether the file is a Windows driver.
-        
+
         This will return true only if there are reliable indicators of the image
         being a driver.
         """
@@ -4706,21 +4721,21 @@
         # reliable enough indicator.  For instance, PEs that play the invalid
         # ImageBase trick to get relocated could be incorrectly assumed to be
         # drivers.
-         
+
         # This is not reliable either...
-        #   
+        #
         # if any( (section.Characteristics & SECTION_CHARACTERISTICS['IMAGE_SCN_MEM_NOT_PAGED']) for section in self.sections ):
         #    return True
-            
+
         if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
-            
+
             # If it imports from "ntoskrnl.exe" or other kernel components it should be a driver
             #
             if set( ('ntoskrnl.exe', 'hal.dll', 'ndis.sys', 'bootvid.dll', 'kdcom.dll' ) ).intersection( [ imp.dll.lower() for imp in self.DIRECTORY_ENTRY_IMPORT ] ):
                 return True
-                
+
         return False
-    
+
 
     def get_overlay_data_start_offset(self):
         """Get the offset of data appended to the file and not contained within the area described in the headers."""
@@ -4728,58 +4743,58 @@
         highest_PointerToRawData = 0
         highest_SizeOfRawData = 0
         for section in self.sections:
-            
+
             # If a section seems to fall outside the boundaries of the file we assume it's either
             # because of intentionally misleading values or because the file is truncated
             # In either case we skip it
             if section.PointerToRawData + section.SizeOfRawData > len(self.__data__):
                 continue
-                
+
             if section.PointerToRawData + section.SizeOfRawData > highest_PointerToRawData + highest_SizeOfRawData:
                 highest_PointerToRawData = section.PointerToRawData
                 highest_SizeOfRawData = section.SizeOfRawData
-                
+
         if len(self.__data__) > highest_PointerToRawData + highest_SizeOfRawData:
             return highest_PointerToRawData + highest_SizeOfRawData
-            
+
         return None
-    
-    
+
+
     def get_overlay(self):
         """Get the data appended to the file and not contained within the area described in the headers."""
 
         overlay_data_offset = self.get_overlay_data_start_offset()
-                
+
         if overlay_data_offset is not None:
             return self.__data__[ overlay_data_offset : ]
-            
+
         return None
-        
-    
+
+
     def trim(self):
         """Return the just data defined by the PE headers, removing any overlayed data."""
-    
+
         overlay_data_offset = self.get_overlay_data_start_offset()
-                
+
         if overlay_data_offset is not None:
             return self.__data__[ : overlay_data_offset ]
-            
+
         return self.__data__[:]
-        
-        
+
+
     # According to http://corkami.blogspot.com/2010/01/parce-que-la-planche-aura-brule.html
     # if PointerToRawData is less that 0x200 it's rounded to zero. Loading the test file
     # in a debugger it's easy to verify that the PointerToRawData value of 1 is rounded
-    # to zero. Hence we reproduce the behabior
+    # to zero. Hence we reproduce the behavior
     #
-    # According to the document: 
+    # According to the document:
     # [ Microsoft Portable Executable and Common Object File Format Specification ]
     # "The alignment factor (in bytes) that is used to align the raw data of sections in
     #  the image file. The value should be a power of 2 between 512 and 64 K, inclusive.
     #  The default is 512. If the SectionAlignment is less than the architecture’s page
     #  size, then FileAlignment must match SectionAlignment."
     #
-    # The following is a hardcoded constant if the Windows loader
+    # The following is a hard-coded constant if the Windows loader
     def adjust_FileAlignment( self, val, file_alignment ):
         global FileAlignment_Warning
         if file_alignment > FILE_ALIGNEMNT_HARDCODED_VALUE:
@@ -4795,7 +4810,7 @@
         return (val / 0x200) * 0x200
 
 
-    # According to the document: 
+    # According to the document:
     # [ Microsoft Portable Executable and Common Object File Format Specification ]
     # "The alignment (in bytes) of sections when they are loaded into memory. It must be
     #  greater than or equal to FileAlignment. The default is the page size for the
@@ -4809,16 +4824,16 @@
                     'If FileAlignment(%x) < 0x200 it should equal SectionAlignment(%x)' % (
                         file_alignment, section_alignment)  )
                 SectionAlignment_Warning = True
-            
+
         if section_alignment < 0x1000: # page size
             section_alignment = file_alignment
-            
+
         # 0x200 is the minimum valid FileAlignment according to the documentation
         # although ntoskrnl.exe has an alignment of 0x80 in some Windows versions
         #
-        #elif section_alignment < 0x80: 
+        #elif section_alignment < 0x80:
         #    section_alignment = 0x80
-            
+
         if section_alignment and val % section_alignment:
             return section_alignment * ( val / section_alignment )
         return val