Commit f32ee920478742d311978f0e0d1d4874e2e0d639

Authored by Christian Herdtweck
1 parent 9f03687a

msodde: Fix indent and line lengths

(making pylint happier, part 1)
Showing 1 changed file with 23 additions and 21 deletions
oletools/msodde.py
@@ -420,7 +420,7 @@ def process_doc_stream(stream): @@ -420,7 +420,7 @@ def process_doc_stream(stream):
420 pass 420 pass
421 elif len(field_contents) > OLE_FIELD_MAX_SIZE: 421 elif len(field_contents) > OLE_FIELD_MAX_SIZE:
422 logger.debug('field exceeds max size of {0}. Ignore rest' 422 logger.debug('field exceeds max size of {0}. Ignore rest'
423 - .format(OLE_FIELD_MAX_SIZE)) 423 + .format(OLE_FIELD_MAX_SIZE))
424 max_size_exceeded = True 424 max_size_exceeded = True
425 425
426 # appending a raw byte to a unicode string here. Not clean but 426 # appending a raw byte to a unicode string here. Not clean but
@@ -440,7 +440,7 @@ def process_doc_stream(stream): @@ -440,7 +440,7 @@ def process_doc_stream(stream):
440 logger.debug('big field was not a field after all') 440 logger.debug('big field was not a field after all')
441 441
442 logger.debug('Checked {0} characters, found {1} fields' 442 logger.debug('Checked {0} characters, found {1} fields'
443 - .format(idx, len(result_parts))) 443 + .format(idx, len(result_parts)))
444 444
445 return result_parts 445 return result_parts
446 446
@@ -465,11 +465,10 @@ def process_doc(ole): @@ -465,11 +465,10 @@ def process_doc(ole):
465 direntry = ole._load_direntry(sid) 465 direntry = ole._load_direntry(sid)
466 is_stream = direntry.entry_type == olefile.STGTY_STREAM 466 is_stream = direntry.entry_type == olefile.STGTY_STREAM
467 logger.debug('direntry {:2d} {}: {}' 467 logger.debug('direntry {:2d} {}: {}'
468 - .format(sid, '[orphan]' if is_orphan else direntry.name,  
469 - 'is stream of size {}'.format(direntry.size)  
470 - if is_stream else  
471 - 'no stream ({})'  
472 - .format(direntry.entry_type))) 468 + .format(sid, '[orphan]' if is_orphan else direntry.name,
  469 + 'is stream of size {}'.format(direntry.size)
  470 + if is_stream else
  471 + 'no stream ({})'.format(direntry.entry_type)))
473 if is_stream: 472 if is_stream:
474 new_parts = process_doc_stream( 473 new_parts = process_doc_stream(
475 ole._open(direntry.isectStart, direntry.size)) 474 ole._open(direntry.isectStart, direntry.size))
@@ -609,7 +608,7 @@ def field_is_blacklisted(contents): @@ -609,7 +608,7 @@ def field_is_blacklisted(contents):
609 except ValueError: # first word is no blacklisted command 608 except ValueError: # first word is no blacklisted command
610 return False 609 return False
611 logger.debug('trying to match "{0}" to blacklist command {1}' 610 logger.debug('trying to match "{0}" to blacklist command {1}'
612 - .format(contents, FIELD_BLACKLIST[index])) 611 + .format(contents, FIELD_BLACKLIST[index]))
613 _, nargs_required, nargs_optional, sw_with_arg, sw_solo, sw_format \ 612 _, nargs_required, nargs_optional, sw_with_arg, sw_solo, sw_format \
614 = FIELD_BLACKLIST[index] 613 = FIELD_BLACKLIST[index]
615 614
@@ -621,11 +620,12 @@ def field_is_blacklisted(contents): @@ -621,11 +620,12 @@ def field_is_blacklisted(contents):
621 nargs += 1 620 nargs += 1
622 if nargs < nargs_required: 621 if nargs < nargs_required:
623 logger.debug('too few args: found {0}, but need at least {1} in "{2}"' 622 logger.debug('too few args: found {0}, but need at least {1} in "{2}"'
624 - .format(nargs, nargs_required, contents)) 623 + .format(nargs, nargs_required, contents))
625 return False 624 return False
626 elif nargs > nargs_required + nargs_optional: 625 elif nargs > nargs_required + nargs_optional:
627 - logger.debug('too many args: found {0}, but need at most {1}+{2} in "{3}"'  
628 - .format(nargs, nargs_required, nargs_optional, contents)) 626 + logger.debug('too many args: found {0}, but need at most {1}+{2} in '
  627 + '"{3}"'
  628 + .format(nargs, nargs_required, nargs_optional, contents))
629 return False 629 return False
630 630
631 # check switches 631 # check switches
@@ -635,14 +635,14 @@ def field_is_blacklisted(contents): @@ -635,14 +635,14 @@ def field_is_blacklisted(contents):
635 if expect_arg: # this is an argument for the last switch 635 if expect_arg: # this is an argument for the last switch
636 if arg_choices and (word not in arg_choices): 636 if arg_choices and (word not in arg_choices):
637 logger.debug('Found invalid switch argument "{0}" in "{1}"' 637 logger.debug('Found invalid switch argument "{0}" in "{1}"'
638 - .format(word, contents)) 638 + .format(word, contents))
639 return False 639 return False
640 expect_arg = False 640 expect_arg = False
641 arg_choices = [] # in general, do not enforce choices 641 arg_choices = [] # in general, do not enforce choices
642 continue # "no further questions, your honor" 642 continue # "no further questions, your honor"
643 elif not FIELD_SWITCH_REGEX.match(word): 643 elif not FIELD_SWITCH_REGEX.match(word):
644 logger.debug('expected switch, found "{0}" in "{1}"' 644 logger.debug('expected switch, found "{0}" in "{1}"'
645 - .format(word, contents)) 645 + .format(word, contents))
646 return False 646 return False
647 # we want a switch and we got a valid one 647 # we want a switch and we got a valid one
648 switch = word[1] 648 switch = word[1]
@@ -664,7 +664,7 @@ def field_is_blacklisted(contents): @@ -664,7 +664,7 @@ def field_is_blacklisted(contents):
664 arg_choices = [] # too many choices to list them here 664 arg_choices = [] # too many choices to list them here
665 else: 665 else:
666 logger.debug('unexpected switch {0} in "{1}"' 666 logger.debug('unexpected switch {0} in "{1}"'
667 - .format(switch, contents)) 667 + .format(switch, contents))
668 return False 668 return False
669 669
670 # if nothing went wrong sofar, the contents seems to match the blacklist 670 # if nothing went wrong sofar, the contents seems to match the blacklist
@@ -690,7 +690,7 @@ def process_xlsx(filepath): @@ -690,7 +690,7 @@ def process_xlsx(filepath):
690 for subfile, content_type, handle in parser.iter_non_xml(): 690 for subfile, content_type, handle in parser.iter_non_xml():
691 try: 691 try:
692 logger.info('Parsing non-xml subfile {0} with content type {1}' 692 logger.info('Parsing non-xml subfile {0} with content type {1}'
693 - .format(subfile, content_type)) 693 + .format(subfile, content_type))
694 for record in xls_parser.parse_xlsb_part(handle, content_type, 694 for record in xls_parser.parse_xlsb_part(handle, content_type,
695 subfile): 695 subfile):
696 logger.debug('{0}: {1}'.format(subfile, record)) 696 logger.debug('{0}: {1}'.format(subfile, record))
@@ -729,7 +729,8 @@ class RtfFieldParser(rtfobj.RtfParser): @@ -729,7 +729,8 @@ class RtfFieldParser(rtfobj.RtfParser):
729 729
730 def open_destination(self, destination): 730 def open_destination(self, destination):
731 if destination.cword == b'fldinst': 731 if destination.cword == b'fldinst':
732 - logger.debug('*** Start field data at index %Xh' % destination.start) 732 + logger.debug('*** Start field data at index %Xh'
  733 + % destination.start)
733 734
734 def close_destination(self, destination): 735 def close_destination(self, destination):
735 if destination.cword == b'fldinst': 736 if destination.cword == b'fldinst':
@@ -760,7 +761,7 @@ def process_rtf(file_handle, field_filter_mode=None): @@ -760,7 +761,7 @@ def process_rtf(file_handle, field_filter_mode=None):
760 all_fields = [field.decode('ascii') for field in rtfparser.fields] 761 all_fields = [field.decode('ascii') for field in rtfparser.fields]
761 # apply field command filter 762 # apply field command filter
762 logger.debug('found {1} fields, filtering with mode "{0}"' 763 logger.debug('found {1} fields, filtering with mode "{0}"'
763 - .format(field_filter_mode, len(all_fields))) 764 + .format(field_filter_mode, len(all_fields)))
764 if field_filter_mode in (FIELD_FILTER_ALL, None): 765 if field_filter_mode in (FIELD_FILTER_ALL, None):
765 clean_fields = all_fields 766 clean_fields = all_fields
766 elif field_filter_mode == FIELD_FILTER_DDE: 767 elif field_filter_mode == FIELD_FILTER_DDE:
@@ -817,11 +818,12 @@ def process_csv(filepath): @@ -817,11 +818,12 @@ def process_csv(filepath):
817 results, _ = process_csv_dialect(file_handle, delim) 818 results, _ = process_csv_dialect(file_handle, delim)
818 except csv.Error: # e.g. sniffing fails 819 except csv.Error: # e.g. sniffing fails
819 logger.debug('failed to csv-parse with delimiter {0!r}' 820 logger.debug('failed to csv-parse with delimiter {0!r}'
820 - .format(delim)) 821 + .format(delim))
821 822
822 if is_small and not results: 823 if is_small and not results:
823 # try whole file as single cell, since sniffing fails in this case 824 # try whole file as single cell, since sniffing fails in this case
824 - logger.debug('last attempt: take whole file as single unquoted cell') 825 + logger.debug('last attempt: take whole file as single unquoted '
  826 + 'cell')
825 file_handle.seek(0) 827 file_handle.seek(0)
826 match = CSV_DDE_FORMAT.match(file_handle.read(CSV_SMALL_THRESH)) 828 match = CSV_DDE_FORMAT.match(file_handle.read(CSV_SMALL_THRESH))
827 if match: 829 if match:
@@ -838,8 +840,8 @@ def process_csv_dialect(file_handle, delimiters): @@ -838,8 +840,8 @@ def process_csv_dialect(file_handle, delimiters):
838 delimiters=delimiters) 840 delimiters=delimiters)
839 dialect.strict = False # microsoft is never strict 841 dialect.strict = False # microsoft is never strict
840 logger.debug('sniffed csv dialect with delimiter {0!r} ' 842 logger.debug('sniffed csv dialect with delimiter {0!r} '
841 - 'and quote char {1!r}'  
842 - .format(dialect.delimiter, dialect.quotechar)) 843 + 'and quote char {1!r}'
  844 + .format(dialect.delimiter, dialect.quotechar))
843 845
844 # rewind file handle to start 846 # rewind file handle to start
845 file_handle.seek(0) 847 file_handle.seek(0)