docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Determines if the file entry is a metadata file. Args: file_entry (dfvfs.FileEntry): a file entry object. Returns: bool: True if the file entry is a metadata file.
def _IsMetadataFile(self, file_entry): if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_TSK): return True return False
288,825
Processes a data stream containing archive types such as: TAR or ZIP. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification. type_indicators(list[str]): dfVFS archive type indicators found in the data stream.
def _ProcessArchiveTypes(self, mediator, path_spec, type_indicators): number_of_type_indicators = len(type_indicators) if number_of_type_indicators == 0: return self.processing_status = definitions.STATUS_INDICATOR_COLLECTING if number_of_type_indicators > 1: display_name = mediator.GetDisplayName() logger.debug(( 'Found multiple format type indicators: {0:s} for ' 'archive file: {1:s}').format(type_indicators, display_name)) for type_indicator in type_indicators: if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TAR: archive_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_TAR, location='/', parent=path_spec) elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_ZIP: archive_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/', parent=path_spec) else: archive_path_spec = None warning_message = ( 'unsupported archive format type indicator: {0:s}').format( type_indicator) mediator.ProduceExtractionWarning( warning_message, path_spec=path_spec) if archive_path_spec: try: path_spec_generator = self._path_spec_extractor.ExtractPathSpecs( [archive_path_spec], resolver_context=mediator.resolver_context) for generated_path_spec in path_spec_generator: if self._abort: break event_source = event_sources.FileEntryEventSource( path_spec=generated_path_spec) event_source.file_entry_type = ( dfvfs_definitions.FILE_ENTRY_TYPE_FILE) mediator.ProduceEventSource(event_source) self.last_activity_timestamp = time.time() except (IOError, errors.MaximumRecursionDepth) as exception: warning_message = ( 'unable to process archive file with error: {0!s}').format( exception) mediator.ProduceExtractionWarning( warning_message, path_spec=generated_path_spec)
288,826
Processes a data stream containing compressed stream types such as: bz2. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification. type_indicators(list[str]): dfVFS archive type indicators found in the data stream.
def _ProcessCompressedStreamTypes(self, mediator, path_spec, type_indicators): number_of_type_indicators = len(type_indicators) if number_of_type_indicators == 0: return self.processing_status = definitions.STATUS_INDICATOR_COLLECTING if number_of_type_indicators > 1: display_name = mediator.GetDisplayName() logger.debug(( 'Found multiple format type indicators: {0:s} for ' 'compressed stream file: {1:s}').format( type_indicators, display_name)) for type_indicator in type_indicators: if type_indicator == dfvfs_definitions.TYPE_INDICATOR_BZIP2: compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2, parent=path_spec) elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_GZIP: compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=path_spec) else: compressed_stream_path_spec = None warning_message = ( 'unsupported compressed stream format type indicators: ' '{0:s}').format(type_indicator) mediator.ProduceExtractionWarning( warning_message, path_spec=path_spec) if compressed_stream_path_spec: event_source = event_sources.FileEntryEventSource( path_spec=compressed_stream_path_spec) event_source.file_entry_type = dfvfs_definitions.FILE_ENTRY_TYPE_FILE mediator.ProduceEventSource(event_source) self.last_activity_timestamp = time.time()
288,827
Processes a directory file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry of the directory.
def _ProcessDirectory(self, mediator, file_entry): self.processing_status = definitions.STATUS_INDICATOR_COLLECTING if self._processing_profiler: self._processing_profiler.StartTiming('collecting') for sub_file_entry in file_entry.sub_file_entries: if self._abort: break try: if not sub_file_entry.IsAllocated(): continue except dfvfs_errors.BackEndError as exception: warning_message = ( 'unable to process directory entry: {0:s} with error: ' '{1!s}').format(sub_file_entry.name, exception) mediator.ProduceExtractionWarning( warning_message, path_spec=file_entry.path_spec) continue # For TSK-based file entries only, ignore the virtual /$OrphanFiles # directory. if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK: if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles': continue event_source = event_sources.FileEntryEventSource( path_spec=sub_file_entry.path_spec) # TODO: move this into a dfVFS file entry property. stat_object = sub_file_entry.GetStat() if stat_object: event_source.file_entry_type = stat_object.type mediator.ProduceEventSource(event_source) self.last_activity_timestamp = time.time() if self._processing_profiler: self._processing_profiler.StopTiming('collecting') self.processing_status = definitions.STATUS_INDICATOR_RUNNING
288,828
Processes a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry.
def _ProcessFileEntry(self, mediator, file_entry): display_name = mediator.GetDisplayName() logger.debug( '[ProcessFileEntry] processing file entry: {0:s}'.format(display_name)) reference_count = mediator.resolver_context.GetFileObjectReferenceCount( file_entry.path_spec) try: if self._IsMetadataFile(file_entry): self._ProcessMetadataFile(mediator, file_entry) else: file_entry_processed = False for data_stream in file_entry.data_streams: if self._abort: break if self._CanSkipDataStream(file_entry, data_stream): logger.debug(( '[ProcessFileEntry] Skipping datastream {0:s} for {1:s}: ' '{2:s}').format( data_stream.name, file_entry.type_indicator, display_name)) continue self._ProcessFileEntryDataStream(mediator, file_entry, data_stream) file_entry_processed = True if not file_entry_processed: # For when the file entry does not contain a data stream. self._ProcessFileEntryDataStream(mediator, file_entry, None) finally: new_reference_count = ( mediator.resolver_context.GetFileObjectReferenceCount( file_entry.path_spec)) if reference_count != new_reference_count: # Clean up after parsers that do not call close explicitly. if mediator.resolver_context.ForceRemoveFileObject( file_entry.path_spec): logger.warning( 'File-object not explicitly closed for file: {0:s}'.format( display_name)) logger.debug( '[ProcessFileEntry] done processing file entry: {0:s}'.format( display_name))
288,829
Processes a specific data stream of a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry containing the data stream. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
def _ProcessFileEntryDataStream(self, mediator, file_entry, data_stream): display_name = mediator.GetDisplayName() data_stream_name = getattr(data_stream, 'name', '') or '' logger.debug(( '[ProcessFileEntryDataStream] processing data stream: "{0:s}" of ' 'file entry: {1:s}').format(data_stream_name, display_name)) mediator.ClearEventAttributes() if data_stream and self._analyzers: # Since AnalyzeDataStream generates event attributes it needs to be # called before producing events. self._AnalyzeDataStream(mediator, file_entry, data_stream.name) self._ExtractMetadataFromFileEntry(mediator, file_entry, data_stream) # Not every file entry has a data stream. In such cases we want to # extract the metadata only. if not data_stream: return # Determine if the content of the file entry should not be extracted. skip_content_extraction = self._CanSkipContentExtraction(file_entry) if skip_content_extraction: display_name = mediator.GetDisplayName() logger.debug( 'Skipping content extraction of: {0:s}'.format(display_name)) self.processing_status = definitions.STATUS_INDICATOR_IDLE return path_spec = copy.deepcopy(file_entry.path_spec) if data_stream and not data_stream.IsDefault(): path_spec.data_stream = data_stream.name archive_types = [] compressed_stream_types = [] if self._process_compressed_streams: compressed_stream_types = self._GetCompressedStreamTypes( mediator, path_spec) if not compressed_stream_types: archive_types = self._GetArchiveTypes(mediator, path_spec) if archive_types: if self._process_archives: self._ProcessArchiveTypes(mediator, path_spec, archive_types) if dfvfs_definitions.TYPE_INDICATOR_ZIP in archive_types: # ZIP files are the base of certain file formats like docx. self._ExtractContentFromDataStream( mediator, file_entry, data_stream.name) elif compressed_stream_types: self._ProcessCompressedStreamTypes( mediator, path_spec, compressed_stream_types) else: self._ExtractContentFromDataStream( mediator, file_entry, data_stream.name)
288,830
Processes a metadata file. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry of the metadata file.
def _ProcessMetadataFile(self, mediator, file_entry): self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) for data_stream in file_entry.data_streams: if self._abort: break self.last_activity_timestamp = time.time() self._event_extractor.ParseMetadataFile( mediator, file_entry, data_stream.name)
288,831
Sets the hasher names. Args: hasher_names_string (str): comma separated names of the hashers to enable, where 'none' disables the hashing analyzer.
def _SetHashers(self, hasher_names_string): if not hasher_names_string or hasher_names_string == 'none': return analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance( 'hashing') analyzer_object.SetHasherNames(hasher_names_string) self._analyzers.append(analyzer_object)
288,832
Sets the Yara rules. Args: yara_rules_string (str): unparsed Yara rule definitions.
def _SetYaraRules(self, yara_rules_string): if not yara_rules_string: return analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance( 'yara') analyzer_object.SetRules(yara_rules_string) self._analyzers.append(analyzer_object)
288,833
Processes a path specification. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification.
def ProcessPathSpec(self, mediator, path_spec): self.last_activity_timestamp = time.time() self.processing_status = definitions.STATUS_INDICATOR_RUNNING file_entry = path_spec_resolver.Resolver.OpenFileEntry( path_spec, resolver_context=mediator.resolver_context) if file_entry is None: display_name = mediator.GetDisplayNameForPathSpec(path_spec) logger.warning( 'Unable to open file entry with path spec: {0:s}'.format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_IDLE return mediator.SetFileEntry(file_entry) try: if file_entry.IsDirectory(): self._ProcessDirectory(mediator, file_entry) self._ProcessFileEntry(mediator, file_entry) finally: mediator.ResetFileEntry() self.last_activity_timestamp = time.time() self.processing_status = definitions.STATUS_INDICATOR_IDLE
288,834
Sets the extraction configuration settings. Args: configuration (ExtractionConfiguration): extraction configuration.
def SetExtractionConfiguration(self, configuration): self._hasher_file_size_limit = configuration.hasher_file_size_limit self._SetHashers(configuration.hasher_names_string) self._process_archives = configuration.process_archives self._process_compressed_streams = configuration.process_compressed_streams self._SetYaraRules(configuration.yara_rules_string)
288,835
Parse a single log line and produce an event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): identifier of the structure of tokens. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
def _ParseLogLine(self, parser_mediator, structure, key): time_elements_tuple = self._GetTimeElementsTuple(structure) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return self._last_month = time_elements_tuple[1] # If the actual entry is a repeated entry, we take the basic information # from the previous entry, but use the timestamp from the actual entry. if key == 'logline': self._previous_structure = structure else: structure = self._previous_structure event_data = MacAppFirewallLogEventData() event_data.action = structure.action event_data.agent = structure.agent event_data.computer_name = structure.computer_name # Due to the use of CharsNotIn pyparsing structure contains whitespaces # that need to be removed. event_data.process_name = structure.process_name.strip() event_data.status = structure.status event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
288,838
Verify that this file is a Mac AppFirewall log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
def VerifyStructure(self, parser_mediator, line): self._last_month = 0 self._year_use = parser_mediator.GetEstimatedYear() try: structure = self.FIREWALL_LINE.parseString(line) except pyparsing.ParseException as exception: logger.debug(( 'Unable to parse file as a Mac AppFirewall log file with error: ' '{0!s}').format(exception)) return False if structure.action != 'creating /var/log/appfirewall.log': logger.debug( 'Not a Mac AppFirewall log file, invalid action: {0!s}'.format( structure.action)) return False if structure.status != 'Error': logger.debug( 'Not a Mac AppFirewall log file, invalid status: {0!s}'.format( structure.status)) return False time_elements_tuple = self._GetTimeElementsTuple(structure) try: dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: logger.debug(( 'Not a Mac AppFirewall log file, invalid date and time: ' '{0!s}').format(structure.date_time)) return False self._last_month = time_elements_tuple[1] return True
288,839
Determines if a parser can process a file entry. Args: file_entry (dfvfs.FileEntry): file entry. parser (BaseParser): parser. Returns: bool: True if the file entry can be processed by the parser object.
def _CheckParserCanProcessFileEntry(self, parser, file_entry): for filter_object in parser.FILTERS: if filter_object.Match(file_entry): return True return False
288,841
Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures.
def _GetSignatureMatchParserNames(self, file_object): parser_names = [] scan_state = pysigscan.scan_state() self._file_scanner.scan_file_object(scan_state, file_object) for scan_result in iter(scan_state.scan_results): format_specification = ( self._formats_with_signatures.GetSpecificationBySignature( scan_result.identifier)) if format_specification.identifier not in parser_names: parser_names.append(format_specification.identifier) return parser_names
288,842
Parses a data stream of a file entry with a specific parser. Args: parser_mediator (ParserMediator): parser mediator. parser (BaseParser): parser. file_entry (dfvfs.FileEntry): file entry. data_stream_name (str): data stream name. Raises: RuntimeError: if the file-like object is missing.
def _ParseDataStreamWithParser( self, parser_mediator, parser, file_entry, data_stream_name): file_object = file_entry.GetFileObject(data_stream_name=data_stream_name) if not file_object: raise RuntimeError( 'Unable to retrieve file-like object from file entry.') try: self._ParseFileEntryWithParser( parser_mediator, parser, file_entry, file_object=file_object) finally: file_object.close()
288,844
Parses a data stream of a file entry with the enabled parsers. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry. data_stream_name (str): data stream name. Raises: RuntimeError: if the file-like object or the parser object is missing.
def ParseDataStream(self, parser_mediator, file_entry, data_stream_name): file_object = file_entry.GetFileObject(data_stream_name=data_stream_name) if not file_object: raise RuntimeError( 'Unable to retrieve file-like object from file entry.') try: parser_names = self._GetSignatureMatchParserNames(file_object) parse_with_non_sigscan_parsers = True if parser_names: parse_result = self._ParseFileEntryWithParsers( parser_mediator, parser_names, file_entry, file_object=file_object) if parse_result in ( self._PARSE_RESULT_FAILURE, self._PARSE_RESULT_SUCCESS): parse_with_non_sigscan_parsers = False if parse_with_non_sigscan_parsers: self._ParseFileEntryWithParsers( parser_mediator, self._non_sigscan_parser_names, file_entry, file_object=file_object) finally: file_object.close()
288,847
Parses the file entry metadata e.g. file system data. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry.
def ParseFileEntryMetadata(self, parser_mediator, file_entry): if self._filestat_parser: self._ParseFileEntryWithParser( parser_mediator, self._filestat_parser, file_entry)
288,848
Parses a metadata file. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry. data_stream_name (str): data stream name.
def ParseMetadataFile( self, parser_mediator, file_entry, data_stream_name): parent_path_spec = getattr(file_entry.path_spec, 'parent', None) filename_upper = file_entry.name.upper() if (self._mft_parser and parent_path_spec and filename_upper in ('$MFT', '$MFTMIRR') and not data_stream_name): self._ParseDataStreamWithParser( parser_mediator, self._mft_parser, file_entry, '') elif (self._usnjrnl_parser and parent_path_spec and filename_upper == '$USNJRNL' and data_stream_name == '$J'): # To be able to ignore the sparse data ranges the UsnJrnl parser # needs to read directly from the volume. volume_file_object = path_spec_resolver.Resolver.OpenFileObject( parent_path_spec, resolver_context=parser_mediator.resolver_context) try: self._ParseFileEntryWithParser( parser_mediator, self._usnjrnl_parser, file_entry, file_object=volume_file_object) finally: volume_file_object.close()
288,849
Initializes a path specification extractor. The source collector discovers all the file entries in the source. The source can be a single file, directory or a volume within a storage media image or device. Args: duplicate_file_check (Optional[bool]): True if duplicate files should be ignored.
def __init__(self, duplicate_file_check=False): super(PathSpecExtractor, self).__init__() self._duplicate_file_check = duplicate_file_check self._hashlist = {}
288,850
Calculates an MD5 from the date and time value of a NTFS file entry. Args: file_entry (dfvfs.FileEntry): file entry. Returns: str: hexadecimal representation of the MD5 hash value of the date and time values of the file entry.
def _CalculateNTFSTimeHash(self, file_entry): date_time_values = [] access_time = getattr(file_entry, 'access_time', None) if access_time: date_time_string = access_time.CopyToDateTimeString() date_time_values.append('atime:{0:s}'.format(date_time_string)) creation_time = getattr(file_entry, 'creation_time', None) if creation_time: date_time_string = creation_time.CopyToDateTimeString() date_time_values.append('crtime:{0:s}'.format(date_time_string)) modification_time = getattr(file_entry, 'modification_time', None) if modification_time: date_time_string = modification_time.CopyToDateTimeString() date_time_values.append('mtime:{0:s}'.format(date_time_string)) # file_entry.change_time is an alias of file_entry.entry_modification_time. change_time = getattr(file_entry, 'change_time', None) if change_time: date_time_string = change_time.CopyToDateTimeString() date_time_values.append('ctime:{0:s}'.format(date_time_string)) date_time_values = ''.join(date_time_values) date_time_values = date_time_values.encode('ascii') hash_value = hashlib.md5() hash_value.update(date_time_values) return hash_value.hexdigest()
288,851
Extracts path specification from a directory. Args: file_entry (dfvfs.FileEntry): file entry that refers to the directory. depth (Optional[int]): current depth where 0 represents the file system root. Yields: dfvfs.PathSpec: path specification of a file entry found in the directory.
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0): if depth >= self._MAXIMUM_DEPTH: raise errors.MaximumRecursionDepth('Maximum recursion depth reached.') # Need to do a breadth-first search otherwise we'll hit the Python # maximum recursion depth. sub_directories = [] for sub_file_entry in file_entry.sub_file_entries: try: if not sub_file_entry.IsAllocated() or sub_file_entry.IsLink(): continue except dfvfs_errors.BackEndError as exception: logger.warning( 'Unable to process file: {0:s} with error: {1!s}'.format( sub_file_entry.path_spec.comparable.replace( '\n', ';'), exception)) continue # For TSK-based file entries only, ignore the virtual /$OrphanFiles # directory. if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK: if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles': continue if sub_file_entry.IsDirectory(): sub_directories.append(sub_file_entry) elif sub_file_entry.IsFile(): # If we are dealing with a VSS we want to calculate a hash # value based on available timestamps and compare that to previously # calculated hash values, and only include the file into the queue if # the hash does not match. if self._duplicate_file_check: hash_value = self._CalculateNTFSTimeHash(sub_file_entry) inode = getattr(sub_file_entry.path_spec, 'inode', 0) if inode in self._hashlist: if hash_value in self._hashlist[inode]: continue self._hashlist.setdefault(inode, []).append(hash_value) for path_spec in self._ExtractPathSpecsFromFile(sub_file_entry): yield path_spec for sub_file_entry in sub_directories: try: for path_spec in self._ExtractPathSpecsFromDirectory( sub_file_entry, depth=(depth + 1)): yield path_spec except ( IOError, dfvfs_errors.AccessError, dfvfs_errors.BackEndError, dfvfs_errors.PathSpecError) as exception: logger.warning('{0!s}'.format(exception))
288,853
Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file.
def _ExtractPathSpecsFromFile(self, file_entry): produced_main_path_spec = False for data_stream in file_entry.data_streams: # Make a copy so we don't make the changes on a path specification # directly. Otherwise already produced path specifications can be # altered in the process. path_spec = copy.deepcopy(file_entry.path_spec) if data_stream.name: setattr(path_spec, 'data_stream', data_stream.name) yield path_spec if not data_stream.name: produced_main_path_spec = True if not produced_main_path_spec: yield file_entry.path_spec
288,854
Extracts path specification from a specific source. Args: path_specs (Optional[list[dfvfs.PathSpec]]): path specifications. find_specs (Optional[list[dfvfs.FindSpec]]): find specifications. recurse_file_system (Optional[bool]): True if extraction should recurse into a file system. resolver_context (Optional[dfvfs.Context]): resolver context. Yields: dfvfs.PathSpec: path specification of a file entry found in the source.
def ExtractPathSpecs( self, path_specs, find_specs=None, recurse_file_system=True, resolver_context=None): for path_spec in path_specs: for extracted_path_spec in self._ExtractPathSpecs( path_spec, find_specs=find_specs, recurse_file_system=recurse_file_system, resolver_context=resolver_context): yield extracted_path_spec
288,856
Initialize an OLECF property set stream. Args: olecf_item (pyolecf.property_set_stream): OLECF item.
def __init__(self, olecf_item): super(OLECFPropertySetStream, self).__init__() self._properties = {} self.date_time_properties = {} self._ReadPropertySet(olecf_item.set)
288,857
Retrieves the property value as a Python object. Args: property_value (pyolecf.property_value): OLECF property value. Returns: object: property value as a Python object.
def _GetValueAsObject(self, property_value): if property_value.type == pyolecf.value_types.BOOLEAN: return property_value.data_as_boolean if property_value.type in self._INTEGER_TYPES: return property_value.data_as_integer if property_value.type in self._STRING_TYPES: return property_value.data_as_string try: data = property_value.data except IOError: data = None return data
288,858
Reads properties from a property set. Args: property_set (pyolecf.property_set): OLECF property set.
def _ReadPropertySet(self, property_set): # Combine the values of multiple property sections # but do not override properties that are already set. for property_section in property_set.sections: if property_section.class_identifier != self._CLASS_IDENTIFIER: continue for property_value in property_section.properties: property_name = self._PROPERTY_NAMES.get( property_value.identifier, None) if not property_name: property_name = '0x{0:04}'.format(property_value.identifier) value = self._GetValueAsObject(property_value) if self._PROPERTY_VALUE_MAPPINGS: value_callback_name = self._PROPERTY_VALUE_MAPPINGS.get( property_name, None) if value_callback_name: value_callback_method = getattr(self, value_callback_name, None) if value_callback_method: value = value_callback_method(value) if property_name in self._DATE_TIME_PROPERTIES: properties_dict = self.date_time_properties value = dfdatetime_filetime.Filetime(timestamp=value) else: properties_dict = self._properties if property_name not in properties_dict: properties_dict[property_name] = value
288,859
Retrieves the properties as event data. Args: data_type (str): event data type. Returns: EventData: event data.
def GetEventData(self, data_type): event_data = events.EventData(data_type=data_type) for property_name, property_value in iter(self._properties.items()): if isinstance(property_value, py2to3.BYTES_TYPE): property_value = repr(property_value) setattr(event_data, property_name, property_value) return event_data
288,860
Initializes an event. Args: date_time (dfdatetime.DateTimeValues): date and time values. date_time_description (str): description of the meaning of the date and time values.
def __init__(self, date_time, date_time_description): super(OLECFDocumentSummaryInformationEvent, self).__init__( date_time, date_time_description) self.name = 'Document Summary Information'
288,861
Initializes an event. Args: date_time (dfdatetime.DateTimeValues): date and time values. date_time_description (str): description of the meaning of the date and time values.
def __init__(self, date_time, date_time_description): super(OLECFSummaryInformationEvent, self).__init__( date_time, date_time_description) self.name = 'Summary Information'
288,862
Parses a document summary information OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. root_item (Optional[pyolecf.item]): root item of the OLECF file. Raises: ValueError: If the root item is not set.
def Process(self, parser_mediator, root_item=None, **kwargs): # This will raise if unhandled keyword arguments are passed. super(DocumentSummaryInformationOLECFPlugin, self).Process( parser_mediator, **kwargs) if not root_item: raise ValueError('Root item not set.') root_creation_time, root_modification_time = self._GetTimestamps(root_item) for item_name in self.REQUIRED_ITEMS: item = root_item.get_sub_item_by_name(item_name) if not item: continue summary_information = OLECFDocumentSummaryInformation(item) event_data = summary_information.GetEventData( data_type='olecf:document_summary_info') event_data.name = 'Document Summary Information' if root_creation_time: date_time = dfdatetime_filetime.Filetime( timestamp=root_creation_time) event = OLECFDocumentSummaryInformationEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if root_modification_time: date_time = dfdatetime_filetime.Filetime( timestamp=root_modification_time) event = OLECFDocumentSummaryInformationEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,863
Parses a summary information OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. root_item (Optional[pyolecf.item]): root item of the OLECF file. Raises: ValueError: If the root item is not set.
def Process(self, parser_mediator, root_item=None, **kwargs): # This will raise if unhandled keyword arguments are passed. super(SummaryInformationOLECFPlugin, self).Process( parser_mediator, **kwargs) if not root_item: raise ValueError('Root item not set.') root_creation_time, root_modification_time = self._GetTimestamps(root_item) for item_name in self.REQUIRED_ITEMS: item = root_item.get_sub_item_by_name(item_name) if not item: continue summary_information = OLECFSummaryInformation(item) event_data = summary_information.GetEventData( data_type='olecf:summary_info') event_data.name = 'Summary Information' for property_name, date_time in iter( summary_information.date_time_properties.items()): date_time_description = self._DATE_TIME_DESCRIPTIONS.get( property_name, definitions.TIME_DESCRIPTION_UNKNOWN) event = OLECFSummaryInformationEvent(date_time, date_time_description) parser_mediator.ProduceEventWithEventData(event, event_data) if root_creation_time: date_time = dfdatetime_filetime.Filetime( timestamp=root_creation_time) event = OLECFSummaryInformationEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if root_modification_time: date_time = dfdatetime_filetime.Filetime( timestamp=root_modification_time) event = OLECFSummaryInformationEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,864
Initializes an artifact definitions filter helper. Args: artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifact definitions registry. knowledge_base (KnowledgeBase): contains information from the source data needed for filtering.
def __init__(self, artifacts_registry, knowledge_base): super(ArtifactDefinitionsFilterHelper, self).__init__() self._artifacts_registry = artifacts_registry self._knowledge_base = knowledge_base self.file_system_artifact_names = set() self.file_system_find_specs = [] self.registry_artifact_names = set() self.registry_find_specs = []
288,867
Checks if a Windows Registry key path is supported by dfWinReg. Args: key_path (str): path of the Windows Registry key. Returns: bool: True if key is compatible or False if not.
def CheckKeyCompatibility(cls, key_path): key_path_upper = key_path.upper() for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES: if key_path_upper.startswith(key_path_prefix): return True logger.warning('Key path: "{0:s}" is currently not supported'.format( key_path)) return False
288,868
Builds find specifications from artifact definitions. Args: artifact_filter_names (list[str]): names of artifact definitions that are used for filtering file system and Windows Registry key paths. environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables.
def BuildFindSpecs(self, artifact_filter_names, environment_variables=None): find_specs = [] for name in artifact_filter_names: definition = self._artifacts_registry.GetDefinitionByName(name) if not definition: logger.debug('undefined artifact definition: {0:s}'.format(name)) continue logger.debug('building find spec from artifact definition: {0:s}'.format( name)) artifact_find_specs = self._BuildFindSpecsFromArtifact( definition, environment_variables) find_specs.extend(artifact_find_specs) for find_spec in find_specs: if isinstance(find_spec, file_system_searcher.FindSpec): self.file_system_find_specs.append(find_spec) elif isinstance(find_spec, registry_searcher.FindSpec): self.registry_find_specs.append(find_spec) else: logger.warning('Unsupported find specification type: {0:s}'.format( type(find_spec)))
288,869
Builds find specifications from an artifact definition. Args: definition (artifacts.ArtifactDefinition): artifact definition. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: list[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find specifications.
def _BuildFindSpecsFromArtifact(self, definition, environment_variables): find_specs = [] for source in definition.sources: if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE: for path_entry in set(source.paths): specifications = self._BuildFindSpecsFromFileSourcePath( path_entry, source.separator, environment_variables, self._knowledge_base.user_accounts) find_specs.extend(specifications) self.file_system_artifact_names.add(definition.name) elif (source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY): for key_path in set(source.keys): if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path): specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path) find_specs.extend(specifications) self.registry_artifact_names.add(definition.name) elif (source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE): # TODO: Handle Registry Values Once Supported in dfwinreg. # https://github.com/log2timeline/dfwinreg/issues/98 # Use set-comprehension to create a set of the source key paths. key_paths = { key_value['key'] for key_value in source.key_value_pairs} key_paths_string = ', '.join(key_paths) logger.warning(( 'Windows Registry values are not supported, extracting keys: ' '"{0!s}"').format(key_paths_string)) for key_path in key_paths: if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path): specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path) find_specs.extend(specifications) self.registry_artifact_names.add(definition.name) elif (source.type_indicator == artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP): for name in source.names: specifications = self._BuildFindSpecsFromGroupName( name, environment_variables) find_specs.extend(specifications) else: logger.warning( 'Unsupported artifact definition source type: "{0:s}"'.format( source.type_indicator)) return find_specs
288,870
Builds find specifications from a artifact group name. Args: group_name (str): artifact group name. environment_variables (list[str]): environment variable attributes used to dynamically populate environment variables in file and registry artifacts. Returns: list[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no artifact with the given name can be retrieved.
def _BuildFindSpecsFromGroupName(self, group_name, environment_variables): definition = self._artifacts_registry.GetDefinitionByName(group_name) if not definition: return None return self._BuildFindSpecsFromArtifact(definition, environment_variables)
288,871
Build find specifications from a Windows Registry source type. Args: key_path (str): Windows Registry key path defined by the source. Returns: list[dfwinreg.FindSpec]: find specifications for the Windows Registry source type.
def _BuildFindSpecsFromRegistrySourceKey(self, key_path): find_specs = [] for key_path_glob in path_helper.PathHelper.ExpandRecursiveGlobs( key_path, '\\'): logger.debug('building find spec from key path glob: {0:s}'.format( key_path_glob)) key_path_glob_upper = key_path_glob.upper() if key_path_glob_upper.startswith('HKEY_USERS\\%%USERS.SID%%'): key_path_glob = 'HKEY_CURRENT_USER{0:s}'.format(key_path_glob[26:]) find_spec = registry_searcher.FindSpec(key_path_glob=key_path_glob) find_specs.append(find_spec) return find_specs
288,873
Retrieves the page for the extension from the Chrome store website. Args: extension_identifier (str): Chrome extension identifier. Returns: str: page content or None.
def _GetChromeWebStorePage(self, extension_identifier): web_store_url = self._WEB_STORE_URL.format(xid=extension_identifier) try: response = requests.get(web_store_url) except (requests.ConnectionError, requests.HTTPError) as exception: logger.warning(( '[{0:s}] unable to retrieve URL: {1:s} with error: {2!s}').format( self.NAME, web_store_url, exception)) return None return response.text
288,875
Given a path give back the path separator as a best guess. Args: path (str): path. Returns: str: path segment separator.
def _GetPathSegmentSeparator(self, path): if path.startswith('\\') or path[1:].startswith(':\\'): return '\\' if path.startswith('/'): return '/' if '/' and '\\' in path: # Let's count slashes and guess which one is the right one. forward_count = len(path.split('/')) backward_count = len(path.split('\\')) if forward_count > backward_count: return '/' return '\\' # Now we are sure there is only one type of separators yet # the path does not start with one. if '/' in path: return '/' return '\\'
288,876
Retrieves the name of the extension from the Chrome store website. Args: extension_identifier (str): Chrome extension identifier. Returns: str: name of the extension or None.
def _GetTitleFromChromeWebStore(self, extension_identifier): # Check if we have already looked this extension up. if extension_identifier in self._extensions: return self._extensions.get(extension_identifier) page_content = self._GetChromeWebStorePage(extension_identifier) if not page_content: logger.warning( '[{0:s}] no data returned for extension identifier: {1:s}'.format( self.NAME, extension_identifier)) return None first_line, _, _ = page_content.partition('\n') match = self._TITLE_RE.search(first_line) name = None if match: title = match.group(1) if title.startswith('Chrome Web Store - '): name = title[19:] elif title.endswith('- Chrome Web Store'): name = title[:-19] if not name: self._extensions[extension_identifier] = 'UNKNOWN' return None self._extensions[extension_identifier] = name return name
288,877
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: analysis report.
def CompileReport(self, mediator): lines_of_text = [] for user, extensions in sorted(self._results.items()): lines_of_text.append(' == USER: {0:s} =='.format(user)) for extension, extension_identifier in sorted(extensions): lines_of_text.append(' {0:s} [{1:s}]'.format( extension, extension_identifier)) lines_of_text.append('') lines_of_text.append('') report_text = '\n'.join(lines_of_text) analysis_report = reports.AnalysisReport( plugin_name=self.NAME, text=report_text) analysis_report.report_dict = self._results return analysis_report
288,878
Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
def ExamineEvent(self, mediator, event): # Only interested in filesystem events. if event.data_type != 'fs:stat': return filename = getattr(event, 'filename', None) if not filename: return # Determine if we have a Chrome extension ID. if 'chrome' not in filename.lower(): return if not self._sep: self._sep = self._GetPathSegmentSeparator(filename) if '{0:s}Extensions{0:s}'.format(self._sep) not in filename: return # Now we have extension IDs, let's check if we've got the # folder, nothing else. paths = filename.split(self._sep) if paths[-2] != 'Extensions': return extension_identifier = paths[-1] if extension_identifier == 'Temp': return # Get the user and ID. user = mediator.GetUsernameForPath(filename) # We still want this information in here, so that we can # manually deduce the username. if not user: if len(filename) > 25: user = 'Not found ({0:s}...)'.format(filename[0:25]) else: user = 'Not found ({0:s})'.format(filename) extension_string = self._GetTitleFromChromeWebStore(extension_identifier) if not extension_string: extension_string = extension_identifier self._results.setdefault(user, []) if (extension_string, extension_identifier) not in self._results[user]: self._results[user].append((extension_string, extension_identifier))
288,879
Returns a properly formatted message string. Args: event_object: the event object (instance od EventObject). Returns: A formatted message string.
def _GetMessage(self, event_object): # TODO: move this somewhere where the mediator can be instantiated once. formatter_mediator = formatters_mediator.FormatterMediator() result = '' try: result, _ = formatters_manager.FormattersManager.GetMessageStrings( formatter_mediator, event_object) except KeyError as exception: logging.warning( 'Unable to correctly assemble event with error: {0!s}'.format( exception)) return result
288,882
Returns properly formatted source strings. Args: event_object: the event object (instance od EventObject).
def _GetSources(self, event_object): try: source_short, source_long = ( formatters_manager.FormattersManager.GetSourceStrings(event_object)) except KeyError as exception: logging.warning( 'Unable to correctly assemble event with error: {0!s}'.format( exception)) return source_short, source_long
288,883
Compiles the filter implementation. Args: filter_implementation: a filter object (instance of objectfilter.TODO). Returns: A filter operator (instance of TODO). Raises: ParserError: if an unknown operator is provided.
def Compile(self, filter_implementation): self.attribute = self.swap_source.get(self.attribute, self.attribute) arguments = [self.attribute] op_str = self.operator.lower() operator = filter_implementation.OPS.get(op_str, None) if not operator: raise errors.ParseError('Unknown operator {0:s} provided.'.format( self.operator)) # Plaso specific implementation - if we are comparing a timestamp # to a value, we use our specific implementation that compares # timestamps in a "human readable" format. if self.attribute == 'timestamp': args = [] for argument in self.args: args.append(DateCompareObject(argument)) self.args = args for argument in self.args: if isinstance(argument, DateCompareObject): if 'Less' in str(operator): TimeRangeCache.SetUpperTimestamp(argument.data) else: TimeRangeCache.SetLowerTimestamp(argument.data) arguments.extend(self.args) expander = filter_implementation.FILTERS['ValueExpander'] ops = operator(arguments=arguments, value_expander=expander) if not self.bool_value: if hasattr(ops, 'FlipBool'): ops.FlipBool() return ops
288,885
Take a date object and use that for comparison. Args: data: A string, datetime object or an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. Raises: ValueError: if the date string is invalid.
def __init__(self, data): if isinstance(data, py2to3.INTEGER_TYPES): self.data = data self.text = '{0:d}'.format(data) elif isinstance(data, float): self.data = py2to3.LONG_TYPE(data) self.text = '{0:f}'.format(data) elif isinstance(data, py2to3.STRING_TYPES): if isinstance(data, py2to3.BYTES_TYPE): self.text = data.decode('utf-8', errors='ignore') else: self.text = data try: self.data = timelib.Timestamp.FromTimeString(self.text) except (ValueError, errors.TimestampError): raise ValueError('Wrongly formatted date string: {0:s}'.format( self.text)) elif isinstance(data, datetime.datetime): posix_time = int(calendar.timegm(data.utctimetuple())) self.data = ( posix_time * definitions.MICROSECONDS_PER_SECOND) + data.microsecond self.text = '{0!s}'.format(data) elif isinstance(data, DateCompareObject): self.data = data.data self.text = '{0!s}'.format(data) else: raise ValueError('Unsupported type: {0:s}.'.format(type(data)))
288,886
Parses a log line. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
def _ParseLogLine(self, parser_mediator, structure): try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=structure.date_time) # TODO: check if date and time values are local time or in UTC. date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return event_data = SophosAVLogEventData() event_data.text = structure.text event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
288,892
Verify that this file is a Sophos Anti-Virus log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfVFS. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
def VerifyStructure(self, parser_mediator, line): try: structure = self._LOG_LINE.parseString(line) except pyparsing.ParseException: logger.debug('Not a Sophos Anti-Virus log file') return False # Expect spaces at position 9 and 16. if ' ' not in (line[8], line[15]): logger.debug('Not a Sophos Anti-Virus log file') return False try: dfdatetime_time_elements.TimeElements( time_elements_tuple=structure.date_time) except ValueError: logger.debug(( 'Not a Sophos Anti-Virus log file, invalid date and time: ' '{0!s}').format(structure.date_time)) return False return True
288,893
Retrieves a property value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. properties (dict[str, object]): properties. property_name (str): name of the property. Returns: str: property value.
def _GetPropertyValue(self, parser_mediator, properties, property_name): property_value = properties.get(property_name, None) if isinstance(property_value, py2to3.BYTES_TYPE): try: # TODO: get encoding form XML metadata. property_value = property_value.decode('utf-8') except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to decode property: {0:s}'.format(property_name)) return property_value
288,895
Formats a camel case property name as snake case. Args: property_name (str): property name in camel case. Returns: str: property name in snake case.
def _FormatPropertyName(self, property_name): # TODO: Add Unicode support. fix_key = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', property_name) return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', fix_key).lower()
288,896
Parses a properties XML file. Args: xml_data (bytes): data of a _rels/.rels XML file. Returns: dict[str, object]: properties. Raises: zipfile.BadZipfile: if the properties XML file cannot be read.
def _ParsePropertiesXMLFile(self, xml_data): xml_root = ElementTree.fromstring(xml_data) properties = {} for xml_element in xml_root.iter(): if not xml_element.text: continue # The property name is formatted as: {URL}name # For example: {http://purl.org/dc/terms/}modified _, _, name = xml_element.tag.partition('}') # Do not including the 'lpstr' attribute because it is very verbose. if name == 'lpstr': continue property_name = self._PROPERTY_NAMES.get(name, None) if not property_name: property_name = self._FormatPropertyName(name) properties[property_name] = xml_element.text return properties
288,897
Parses the relationships XML file (_rels/.rels). Args: xml_data (bytes): data of a _rels/.rels XML file. Returns: list[str]: property file paths. The path is relative to the root of the ZIP file. Raises: zipfile.BadZipfile: if the relationship XML file cannot be read.
def _ParseRelationshipsXMLFile(self, xml_data): xml_root = ElementTree.fromstring(xml_data) property_files = [] for xml_element in xml_root.iter(): type_attribute = xml_element.get('Type') if 'properties' in repr(type_attribute): target_attribute = xml_element.get('Target') property_files.append(target_attribute) return property_files
288,898
Parses an OXML file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. zip_file (zipfile.ZipFile): the zip file containing OXML content. It is not be closed in this method, but will be closed by the parser logic in czip.py. Raises: UnableToParseFile: when the file cannot be parsed.
def InspectZipFile(self, parser_mediator, zip_file): try: xml_data = zip_file.read('_rels/.rels') property_files = self._ParseRelationshipsXMLFile(xml_data) except (IndexError, IOError, KeyError, OverflowError, ValueError, zipfile.BadZipfile) as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse relationships XML file: _rels/.rels with error: ' '{0!s}').format(exception)) return metadata = {} for path in property_files: try: xml_data = zip_file.read(path) properties = self._ParsePropertiesXMLFile(xml_data) except (IndexError, IOError, KeyError, OverflowError, ValueError, zipfile.BadZipfile) as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse properties XML file: {0:s} with error: ' '{1!s}').format(path, exception)) continue metadata.update(properties) event_data = OpenXMLEventData() event_data.app_version = self._GetPropertyValue( parser_mediator, metadata, 'app_version') event_data.app_version = self._GetPropertyValue( parser_mediator, metadata, 'app_version') event_data.author = self._GetPropertyValue( parser_mediator, metadata, 'author') event_data.creating_app = self._GetPropertyValue( parser_mediator, metadata, 'creating_app') event_data.doc_security = self._GetPropertyValue( parser_mediator, metadata, 'doc_security') event_data.hyperlinks_changed = self._GetPropertyValue( parser_mediator, metadata, 'hyperlinks_changed') event_data.i4 = self._GetPropertyValue( parser_mediator, metadata, 'i4') event_data.last_saved_by = self._GetPropertyValue( parser_mediator, metadata, 'last_saved_by') event_data.links_up_to_date = self._GetPropertyValue( parser_mediator, metadata, 'links_up_to_date') event_data.number_of_characters = self._GetPropertyValue( parser_mediator, metadata, 'number_of_characters') event_data.number_of_characters_with_spaces = self._GetPropertyValue( parser_mediator, metadata, 'number_of_characters_with_spaces') event_data.number_of_lines = self._GetPropertyValue( parser_mediator, metadata, 'number_of_lines') event_data.number_of_pages = self._GetPropertyValue( parser_mediator, metadata, 'number_of_pages') event_data.number_of_paragraphs = self._GetPropertyValue( parser_mediator, metadata, 'number_of_paragraphs') event_data.number_of_words = self._GetPropertyValue( parser_mediator, metadata, 'number_of_words') event_data.revision_number = self._GetPropertyValue( parser_mediator, metadata, 'revision_number') event_data.scale_crop = self._GetPropertyValue( parser_mediator, metadata, 'scale_crop') event_data.shared_doc = self._GetPropertyValue( parser_mediator, metadata, 'shared_doc') event_data.template = self._GetPropertyValue( parser_mediator, metadata, 'template') event_data.total_time = self._GetPropertyValue( parser_mediator, metadata, 'total_time') self._ProduceEvent( parser_mediator, event_data, metadata, 'created', definitions.TIME_DESCRIPTION_CREATION, 'creation time') self._ProduceEvent( parser_mediator, event_data, metadata, 'modified', definitions.TIME_DESCRIPTION_MODIFICATION, 'modification time') self._ProduceEvent( parser_mediator, event_data, metadata, 'last_printed', definitions.TIME_DESCRIPTION_LAST_PRINTED, 'last printed time')
288,900
Copies the attribute container from a dictionary. Args: attributes (dict[str, object]): attribute values per name.
def CopyFromDict(self, attributes): for attribute_name, attribute_value in attributes.items(): # Not using startswith to improve performance. if attribute_name[0] == '_': continue setattr(self, attribute_name, attribute_value)
288,903
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): installation_value = None string_values = {} for registry_value in registry_key.GetValues(): # Ignore the default value. if not registry_value.name: continue if (registry_value.name == 'InstallDate' and registry_value.DataIsInteger()): installation_value = registry_value continue # Ignore any value that is empty or that does not contain a string. if not registry_value.data or not registry_value.DataIsString(): continue string_value_name = self._STRING_VALUE_NAME_STRINGS.get( registry_value.name, None) if not string_value_name: continue string_values[string_value_name] = registry_value.GetDataAsObject() values_dict = {} values_dict['Owner'] = string_values.get('owner', '') values_dict['Product name'] = string_values.get('product_name', '') values_dict['Service pack'] = string_values.get('service_pack', '') values_dict['Windows Version Information'] = string_values.get( 'version', '') event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) # TODO: if not present indicate anomaly of missing installation # date and time. if installation_value: event_data = windows_events.WindowsRegistryInstallationEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.owner = string_values.get('owner', None) event_data.product_name = string_values.get('product_name', None) event_data.service_pack = string_values.get('service_pack', None) event_data.version = string_values.get('version', None) installation_time = installation_value.GetDataAsObject() date_time = dfdatetime_posix_time.PosixTime(timestamp=installation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_INSTALLATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,908
Parses a matching entry. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): elements parsed from the file. Raises: ParseError: when the structure type is unknown.
def ParseRecord(self, parser_mediator, key, structure): if key not in self._SUPPORTED_KEYS: raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) if key == 'quota_exceeded_line': # skip this line return date_time = dfdatetime_time_elements.TimeElementsInMilliseconds() try: date_time.CopyFromStringISO8601(structure.date) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0:s}'.format(structure.date)) return if key == 'execution_line': event_data = SantaExecutionEventData() event_data.action = structure.action event_data.decision = structure.decision event_data.reason = structure.reason event_data.process_hash = structure.sha256 event_data.certificate_hash = structure.get('cert_sha256', None) event_data.certificate_common_name = structure.get('cert_cn', None) event_data.quarantine_url = structure.get('quarantine_url', None) event_data.pid = structure.pid event_data.ppid = structure.ppid event_data.uid = structure.uid event_data.user = structure.user event_data.gid = structure.gid event_data.group = structure.group event_data.mode = structure.mode event_data.process_path = structure.path event_data.process_arguments = structure.get('args', None) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_RUN) if key == 'file_system_event_line': event_data = SantaFileSystemEventData() event_data.action = structure.action event_data.file_path = structure.path event_data.file_new_path = structure.get('newpath', None) event_data.pid = structure.pid event_data.ppid = structure.ppid event_data.process = structure.process event_data.process_path = structure.processpath event_data.uid = structure.uid event_data.user = structure.user event_data.gid = structure.gid event_data.group = structure.group event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) if key == 'umount_line': event_data = SantaMountEventData() event_data.action = structure.action event_data.mount = structure.mount event_data.volume = structure.volume event_data.bsd_name = structure.bsd_name event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) if key == 'mount_line': event_data = SantaMountEventData() event_data.action = structure.action event_data.mount = structure.mount event_data.volume = structure.volume event_data.bsd_name = structure.bsd_name event_data.fs = structure.fs event_data.model = structure.model event_data.serial = structure.serial event_data.bus = structure.bus event_data.dmg_path = structure.dmg_path event_data.appearance = structure.appearance if event_data.appearance: new_date_time = dfdatetime_time_elements.TimeElementsInMilliseconds() try: new_date_time.CopyFromStringISO8601(event_data.appearance) new_event = time_events.DateTimeValuesEvent( new_date_time, definitions.TIME_DESCRIPTION_FIRST_CONNECTED) parser_mediator.ProduceEventWithEventData(new_event, event_data) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0:s}'.format(event_data.appearance)) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,912
Attempts to send an item to a ZeroMQ socket. Args: zmq_socket (zmq.Socket): used to the send the item. item (object): sent on the queue. Will be pickled prior to sending. block (Optional[bool]): whether the push should be performed in blocking or non-blocking mode. Returns: bool: whether the item was sent successfully.
def _SendItem(self, zmq_socket, item, block=True): try: logger.debug('{0:s} sending item'.format(self.name)) if block: zmq_socket.send_pyobj(item) else: zmq_socket.send_pyobj(item, zmq.DONTWAIT) logger.debug('{0:s} sent item'.format(self.name)) return True except zmq.error.Again: logger.debug('{0:s} could not send an item'.format(self.name)) except zmq.error.ZMQError as exception: if exception.errno == errno.EINTR: logger.error( 'ZMQ syscall interrupted in {0:s}.'.format( self.name)) return False
288,914
Attempts to receive an item from a ZeroMQ socket. Args: zmq_socket (zmq.Socket): used to the receive the item. Returns: object: item from the socket. Raises: QueueEmpty: if no item could be received within the timeout. zmq.error.ZMQError: if an error occurs in ZeroMQ
def _ReceiveItemOnActivity(self, zmq_socket): events = zmq_socket.poll( self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS) if events: try: received_object = self._zmq_socket.recv_pyobj() return received_object except zmq.error.Again: logger.error( '{0:s}. Failed to receive item in time.'.format( self.name)) raise except zmq.error.ZMQError as exception: if exception.errno == errno.EINTR: logger.error( 'ZMQ syscall interrupted in {0:s}. Queue aborting.'.format( self.name)) raise raise errors.QueueEmpty
288,915
Closes the queue. Args: abort (Optional[bool]): whether the Close is the result of an abort condition. If True, queue contents may be lost. Raises: QueueAlreadyClosed: if the queue is not started, or has already been closed. RuntimeError: if closed or terminate event is missing.
def Close(self, abort=False): if not self._closed_event or not self._terminate_event: raise RuntimeError('Missing closed or terminate event.') if not abort and self._closed_event.is_set(): raise errors.QueueAlreadyClosed() self._closed_event.set() if abort: if not self._closed_event.is_set(): logger.warning( '{0:s} queue aborting. Contents may be lost.'.format(self.name)) self._linger_seconds = 0 # We can't determine whether a there might be an operation being performed # on the socket in a separate method or thread, so we'll signal that any # such operation should cease. self._terminate_event.set() else: logger.debug( '{0:s} queue closing, will linger for up to {1:d} seconds'.format( self.name, self._linger_seconds))
288,918
Closes the queue. Args: abort (Optional[bool]): whether the Close is the result of an abort condition. If True, queue contents may be lost. Raises: QueueAlreadyClosed: if the queue is not started, or has already been closed. RuntimeError: if closed or terminate event is missing.
def Close(self, abort=False): if not self._closed_event or not self._terminate_event: raise RuntimeError('Missing closed or terminate event.') if not abort and self._closed_event.is_set(): raise errors.QueueAlreadyClosed() self._closed_event.set() if abort: if not self._closed_event.is_set(): logger.warning( '{0:s} queue aborting. Contents may be lost.'.format(self.name)) # We can't determine whether a there might be an operation being performed # on the socket in a separate method or thread, so we'll signal that any # such operation should cease. self._terminate_event.set() self._linger_seconds = 0 if self._zmq_thread: logger.debug('[{0:s}] Waiting for thread to exit.'.format(self.name)) self._zmq_thread.join(timeout=self.timeout_seconds) if self._zmq_thread.isAlive(): logger.error(( '{0:s} ZMQ responder thread did not exit within timeout').format( self.name)) else: logger.debug( '{0:s} queue closing, will linger for up to {1:d} seconds'.format( self.name, self._linger_seconds))
288,924
Listens for requests and replies to clients. Args: source_queue (Queue.queue): queue to use to pull items from. Raises: RuntimeError: if closed or terminate event is missing.
def _ZeroMQResponder(self, source_queue): if not self._closed_event or not self._terminate_event: raise RuntimeError('Missing closed or terminate event.') logger.debug('{0:s} responder thread started'.format(self.name)) item = None while not self._terminate_event.is_set(): if not item: try: if self._closed_event.is_set(): item = source_queue.get_nowait() else: item = source_queue.get(True, self._buffer_timeout_seconds) except Queue.Empty: if self._closed_event.is_set(): break continue try: # We need to receive a request before we can reply with the item. self._ReceiveItemOnActivity(self._zmq_socket) except errors.QueueEmpty: if self._closed_event.is_set() and self._queue.empty(): break continue sent_successfully = self._SendItem(self._zmq_socket, item) item = None if not sent_successfully: logger.error('Queue {0:s} unable to send item.'.format(self.name)) break logger.info('Queue {0:s} responder exiting.'.format(self.name)) self._zmq_socket.close(self._linger_seconds)
288,925
Deregisters an output class. The output classes are identified based on their NAME attribute. Args: output_class (type): output module class. Raises: KeyError: if output class is not set for the corresponding data type.
def DeregisterOutput(cls, output_class): output_class_name = output_class.NAME.lower() if output_class_name in cls._disabled_output_classes: class_dict = cls._disabled_output_classes else: class_dict = cls._output_classes if output_class_name not in class_dict: raise KeyError( 'Output class not set for name: {0:s}.'.format( output_class.NAME)) del class_dict[output_class_name]
288,927
Retrieves the output class for a specific name. Args: name (str): name of the output module. Returns: type: output module class. Raises: KeyError: if there is no output class found with the supplied name. ValueError: if name is not a string.
def GetOutputClass(cls, name): if not isinstance(name, py2to3.STRING_TYPES): raise ValueError('Name attribute is not a string.') name = name.lower() if name not in cls._output_classes: raise KeyError( 'Name: [{0:s}] not registered as an output module.'.format(name)) return cls._output_classes[name]
288,929
Determines if a specific output class is registered with the manager. Args: name (str): name of the output module. Returns: bool: True if the output class is registered.
def HasOutputClass(cls, name): if not isinstance(name, py2to3.STRING_TYPES): return False return name.lower() in cls._output_classes
288,931
Determines if a specific output class is a linear output module. Args: name (str): name of the output module. Returns: True: if the output module is linear.
def IsLinearOutputModule(cls, name): name = name.lower() output_class = cls._output_classes.get(name, None) if not output_class: output_class = cls._disabled_output_classes.get(name, None) if output_class: return issubclass(output_class, interface.LinearOutputModule) return False
288,932
Creates a new output module object for the specified output format. Args: name (str): name of the output module. output_mediator (OutputMediator): output mediator. Returns: OutputModule: output module. Raises: KeyError: if there is no output class found with the supplied name. ValueError: if name is not a string.
def NewOutputModule(cls, name, output_mediator): output_class = cls.GetOutputClass(name) return output_class(output_mediator)
288,933
Registers an output class. The output classes are identified based on their NAME attribute. Args: output_class (type): output module class. disabled (Optional[bool]): True if the output module is disabled due to the module not loading correctly or not. Raises: KeyError: if output class is already set for the corresponding name.
def RegisterOutput(cls, output_class, disabled=False): output_name = output_class.NAME.lower() if disabled: class_dict = cls._disabled_output_classes else: class_dict = cls._output_classes if output_name in class_dict: raise KeyError(( 'Output class already set for name: {0:s}.').format( output_class.NAME)) class_dict[output_name] = output_class
288,934
Registers output classes. The output classes are identified based on their NAME attribute. Args: output_classes (list[type]): output module classes. disabled (Optional[bool]): True if the output module is disabled due to the module not loading correctly or not. Raises: KeyError: if output class is already set for the corresponding name.
def RegisterOutputs(cls, output_classes, disabled=False): for output_class in output_classes: cls.RegisterOutput(output_class, disabled)
288,935
Initializes a sample file profiler. Sample files are gzip compressed UTF-8 encoded CSV files. Args: identifier (str): identifier of the profiling session used to create the sample filename. configuration (ProfilingConfiguration): profiling configuration.
def __init__(self, identifier, configuration): super(SampleFileProfiler, self).__init__() self._identifier = identifier self._path = configuration.directory self._profile_measurements = {} self._sample_file = None self._start_time = None
288,939
Writes a string to the sample file. Args: content (str): content to write to the sample file.
def _WritesString(self, content): content_bytes = codecs.encode(content, 'utf-8') self._sample_file.write(content_bytes)
288,940
Starts timing CPU time. Args: profile_name (str): name of the profile to sample.
def StartTiming(self, profile_name): if profile_name not in self._profile_measurements: self._profile_measurements[profile_name] = CPUTimeMeasurement() self._profile_measurements[profile_name].SampleStart()
288,942
Stops timing CPU time. Args: profile_name (str): name of the profile to sample.
def StopTiming(self, profile_name): measurements = self._profile_measurements.get(profile_name) if measurements: measurements.SampleStop() sample = '{0:f}\t{1:s}\t{2:f}\n'.format( measurements.start_sample_time, profile_name, measurements.total_cpu_time) self._WritesString(sample)
288,943
Initializes a memory profiler. Args: identifier (str): unique name of the profile. configuration (ProfilingConfiguration): profiling configuration.
def __init__(self, identifier, configuration): super(GuppyMemoryProfiler, self).__init__() self._identifier = identifier self._path = configuration.directory self._profiling_sample = 0 self._profiling_sample_rate = configuration.sample_rate self._heapy = None self._sample_file = '{0!s}.hpy'.format(identifier) if self._path: self._sample_file = os.path.join(self._path, self._sample_file) if hpy: self._heapy = hpy()
288,944
Takes a sample for profiling. Args: profile_name (str): name of the profile to sample. used_memory (int): amount of used memory in bytes.
def Sample(self, profile_name, used_memory): sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:d}\n'.format( sample_time, profile_name, used_memory) self._WritesString(sample)
288,947
Takes a sample of data read or written for profiling. Args: operation (str): operation, either 'read' or 'write'. description (str): description of the data read. data_size (int): size of the data read in bytes. compressed_data_size (int): size of the compressed data read in bytes.
def Sample(self, operation, description, data_size, compressed_data_size): sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:s}\t{3:d}\t{4:d}\n'.format( sample_time, operation, description, data_size, compressed_data_size) self._WritesString(sample)
288,948
Takes a sample of the status of queued tasks for profiling. Args: tasks_status (TasksStatus): status information about tasks.
def Sample(self, tasks_status): sample_time = time.time() sample = '{0:f}\t{1:d}\t{2:d}\t{3:d}\t{4:d}\t{5:d}\n'.format( sample_time, tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks) self._WritesString(sample)
288,949
Takes a sample of the status of a task for profiling. Args: task (Task): a task. status (str): status.
def Sample(self, task, status): sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:s}\n'.format( sample_time, task.identifier, status) self._WritesString(sample)
288,950
Initializes a Timesketch output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
def __init__(self, output_mediator): hostname = output_mediator.GetStoredHostname() if hostname: logger.debug('Hostname: {0:s}'.format(hostname)) super(TimesketchOutputModule, self).__init__(output_mediator) self._timeline_name = hostname self._timeline_owner = None self._timesketch = timesketch.create_app()
288,951
Sets the timeline name. Args: timeline_name (str): timeline name.
def SetTimelineName(self, timeline_name): self._timeline_name = timeline_name logger.info('Timeline name: {0:s}'.format(self._timeline_name))
288,953
Sets the username of the user that should own the timeline. Args: username (str): username.
def SetTimelineOwner(self, username): self._timeline_owner = username logger.info('Owner of the timeline: {0!s}'.format(self._timeline_owner))
288,954
Extract relevant information from HTTP header. Args: header_data (bytes): HTTP header data. offset (int): offset of the cache record, relative to the start of the Firefox cache file. display_name (str): display name of the Firefox cache file. Returns: tuple: containing: str: HTTP request method or None if the value cannot be extracted. str: HTTP response code or None if the value cannot be extracted.
def _ParseHTTPHeaders(self, header_data, offset, display_name): header_string = header_data.decode('ascii', errors='replace') try: http_header_start = header_string.index('request-method') except ValueError: logger.debug('No request method in header: "{0:s}"'.format(header_string)) return None, None # HTTP request and response headers. http_headers = header_string[http_header_start::] header_parts = http_headers.split('\x00') # TODO: check len(header_parts). request_method = header_parts[1] if request_method not in self._REQUEST_METHODS: logger.debug(( '[{0:s}] {1:s}:{2:d}: Unknown HTTP method \'{3:s}\'. Response ' 'headers: \'{4:s}\'').format( self.NAME, display_name, offset, request_method, header_string)) try: response_head_start = http_headers.index('response-head') except ValueError: logger.debug('No response head in header: "{0:s}"'.format(header_string)) return request_method, None # HTTP response headers. response_head = http_headers[response_head_start::] response_head_parts = response_head.split('\x00') # Response code, followed by other response header key-value pairs, # separated by newline. # TODO: check len(response_head_parts). response_head_text = response_head_parts[1] response_head_text_parts = response_head_text.split('\r\n') # The first line contains response code. # TODO: check len(response_head_text_parts). response_code = response_head_text_parts[0] if not response_code.startswith('HTTP'): logger.debug(( '[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. ' 'Response headers: \'{3:s}\'.').format( self.NAME, display_name, offset, header_string)) return request_method, response_code
288,957
Determine cache file block size. Args: file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. Returns: firefox_cache_config: namedtuple containing the block size and first record offset. Raises: UnableToParseFile: if no valid cache record could be found.
def _GetFirefoxConfig(self, file_object, display_name): # There ought to be a valid record within the first 4 MiB. We use this # limit to prevent reading large invalid files. to_read = min(file_object.get_size(), self._INITIAL_CACHE_FILE_SIZE) while file_object.get_offset() < to_read: offset = file_object.get_offset() try: cache_entry, _ = self._ReadCacheEntry( file_object, display_name, self._MINIMUM_BLOCK_SIZE) # We have not yet determined the block size, so we use the smallest # possible size. record_size = ( self._CACHE_ENTRY_HEADER_SIZE + cache_entry.request_size + cache_entry.information_size) if record_size >= 4096: # _CACHE_003_ block_size = 4096 elif record_size >= 1024: # _CACHE_002_ block_size = 1024 else: # _CACHE_001_ block_size = 256 return self.FIREFOX_CACHE_CONFIG(block_size, offset) except IOError: logger.debug('[{0:s}] {1:s}:{2:d}: Invalid record.'.format( self.NAME, display_name, offset)) raise errors.UnableToParseFile( 'Could not find a valid cache record. Not a Firefox cache file.')
288,958
Parses a cache entry. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. display_name (str): display name. block_size (int): block size.
def _ParseCacheEntry( self, parser_mediator, file_object, display_name, block_size): cache_entry, event_data = self._ReadCacheEntry( file_object, display_name, block_size) date_time = dfdatetime_posix_time.PosixTime( timestamp=cache_entry.last_fetched_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) if cache_entry.last_modified_time: date_time = dfdatetime_posix_time.PosixTime( timestamp=cache_entry.last_modified_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) if cache_entry.expiration_time: date_time = dfdatetime_posix_time.PosixTime( timestamp=cache_entry.expiration_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,959
Determines whether the values in the cache entry header are valid. Args: cache_entry_header (firefox_cache1_entry_header): cache entry header. Returns: bool: True if the cache entry header is valid.
def _ValidateCacheEntryHeader(self, cache_entry_header): return ( cache_entry_header.request_size > 0 and cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and cache_entry_header.major_format_version == 1 and cache_entry_header.last_fetched_time > 0 and cache_entry_header.fetch_count > 0)
288,961
Parses a Firefox cache file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): filename = parser_mediator.GetFilename() if (not self._CACHE_FILENAME_RE.match(filename) and not filename.startswith('_CACHE_00')): raise errors.UnableToParseFile('Not a Firefox cache1 file.') display_name = parser_mediator.GetDisplayName() firefox_config = self._GetFirefoxConfig(file_object, display_name) file_object.seek(firefox_config.first_record_offset) while file_object.get_offset() < file_object.get_size(): try: self._ParseCacheEntry( parser_mediator, file_object, display_name, firefox_config.block_size) except IOError: file_offset = file_object.get_offset() - self._MINIMUM_BLOCK_SIZE logger.debug(( '[{0:s}] Invalid cache record in file: {1:s} at offset: ' '{2:d}.').format(self.NAME, display_name, file_offset))
288,962
Determines the offset of the cache file metadata header. This method is inspired by the work of James Habben: https://github.com/JamesHabben/FirefoxCache2 Args: file_object (dfvfs.FileIO): a file-like object. Returns: int: offset of the file cache metadata header relative to the start of the file. Raises: IOError: if the start of the cache file metadata could not be determined.
def _GetCacheFileMetadataHeaderOffset(self, file_object): file_object.seek(-4, os.SEEK_END) file_offset = file_object.tell() metadata_size_map = self._GetDataTypeMap('uint32be') try: metadata_size, _ = self._ReadStructureFromFileObject( file_object, file_offset, metadata_size_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse cache file metadata size with error: {0!s}'.format( exception)) # Firefox splits the content into chunks. number_of_chunks, remainder = divmod(metadata_size, self._CHUNK_SIZE) if remainder != 0: number_of_chunks += 1 # Each chunk in the cached record is padded with two bytes. # Skip the first 4 bytes which contains a hash value of the cached content. return metadata_size + (number_of_chunks * 2) + 4
288,963
Determines whether the cache file metadata header is valid. Args: cache_file_metadata_header (firefox_cache2_file_metadata_header): cache file metadata header. Returns: bool: True if the cache file metadata header is valid.
def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header): # TODO: add support for format version 2 and 3 return ( cache_file_metadata_header.key_size > 0 and cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and cache_file_metadata_header.format_version == 1 and cache_file_metadata_header.last_fetched_time > 0 and cache_file_metadata_header.fetch_count > 0)
288,964
Parses a Firefox cache file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): filename = parser_mediator.GetFilename() if not self._CACHE_FILENAME_RE.match(filename): raise errors.UnableToParseFile('Not a Firefox cache2 file.') # The file needs to be at least 36 bytes in size for it to contain # a cache2 file metadata header and a 4-byte offset that points to its # location in the file. file_size = file_object.get_size() if file_size < 36: raise errors.UnableToParseFile( 'File size too small for Firefox cache2 file.') file_offset = self._GetCacheFileMetadataHeaderOffset(file_object) file_metadata_header_map = self._GetDataTypeMap( 'firefox_cache2_file_metadata_header') try: file_metadata_header, _ = self._ReadStructureFromFileObject( file_object, file_offset, file_metadata_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse Firefox cache2 file metadata header with error: ' '{0!s}').format(exception)) if not self._ValidateCacheFileMetadataHeader(file_metadata_header): raise errors.UnableToParseFile('Not a valid Firefox cache2 record.') url = file_object.read(file_metadata_header.key_size) header_data = file_object.read() display_name = parser_mediator.GetDisplayName() request_method, response_code = self._ParseHTTPHeaders( header_data[:-4], file_offset, display_name) event_data = FirefoxCacheEventData() event_data.fetch_count = file_metadata_header.fetch_count event_data.frequency = file_metadata_header.frequency event_data.request_method = request_method event_data.request_size = file_metadata_header.key_size event_data.response_code = response_code event_data.version = self._CACHE_VERSION event_data.url = url.decode('ascii', errors='replace') date_time = dfdatetime_posix_time.PosixTime( timestamp=file_metadata_header.last_fetched_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) if file_metadata_header.last_modified_time: date_time = dfdatetime_posix_time.PosixTime( timestamp=file_metadata_header.last_modified_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) if file_metadata_header.expiration_time: date_time = dfdatetime_posix_time.PosixTime( timestamp=file_metadata_header.expiration_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,965
Initializes the CLI tool object. Args: input_reader (Optional[InputReader]): input reader, where None indicates that the stdin input reader should be used. output_writer (Optional[OutputWriter]): output writer, where None indicates that the stdout output writer should be used.
def __init__(self, input_reader=None, output_writer=None): super(PsortTool, self).__init__( input_reader=input_reader, output_writer=output_writer) self._analysis_manager = analysis_manager.AnalysisPluginManager self._analysis_plugins = None self._analysis_plugins_output_format = None self._command_line_arguments = None self._deduplicate_events = True self._event_filter_expression = None self._event_filter = None self._knowledge_base = knowledge_base.KnowledgeBase() self._number_of_analysis_reports = 0 self._preferred_language = 'en-US' self._process_memory_limit = None self._status_view_mode = status_view.StatusView.MODE_WINDOW self._status_view = status_view.StatusView(self._output_writer, self.NAME) self._stdout_output_writer = isinstance( self._output_writer, tools.StdoutOutputWriter) self._storage_file_path = None self._temporary_directory = None self._time_slice = None self._use_time_slicer = False self._use_zeromq = True self._worker_memory_limit = None self.list_analysis_plugins = False self.list_language_identifiers = False self.list_output_modules = False self.list_profilers = False
288,966
Checks if the storage file path is valid. Args: storage_file_path (str): path of the storage file. Raises: BadConfigOption: if the storage file path is invalid.
def _CheckStorageFile(self, storage_file_path): # pylint: disable=arguments-differ if os.path.exists(storage_file_path): if not os.path.isfile(storage_file_path): raise errors.BadConfigOption( 'Storage file: {0:s} already exists and is not a file.'.format( storage_file_path)) logger.warning('Appending to an already existing storage file.') dirname = os.path.dirname(storage_file_path) if not dirname: dirname = '.' # TODO: add a more thorough check to see if the storage file really is # a plaso storage file. if not os.access(dirname, os.W_OK): raise errors.BadConfigOption( 'Unable to write to storage file: {0:s}'.format(storage_file_path))
288,967
Retrieves analysis plugins. Args: analysis_plugins_string (str): comma separated names of analysis plugins to enable. Returns: list[AnalysisPlugin]: analysis plugins.
def _GetAnalysisPlugins(self, analysis_plugins_string): if not analysis_plugins_string: return [] analysis_plugins_list = [ name.strip() for name in analysis_plugins_string.split(',')] analysis_plugins = self._analysis_manager.GetPluginObjects( analysis_plugins_list) return analysis_plugins.values()
288,968
Parses the analysis plugin options. Args: options (argparse.Namespace): command line arguments.
def _ParseAnalysisPluginOptions(self, options): # Get a list of all available plugins. analysis_plugin_info = self._analysis_manager.GetAllPluginInformation() # Use set-comprehension to create a set of the analysis plugin names. analysis_plugin_names = { name.lower() for name, _, _ in analysis_plugin_info} analysis_plugins = self.ParseStringOption(options, 'analysis_plugins') if not analysis_plugins: return # Use set-comprehension to create a set of the requested plugin names. requested_plugin_names = { name.strip().lower() for name in analysis_plugins.split(',')} # Check to see if we are trying to load plugins that do not exist. difference = requested_plugin_names.difference(analysis_plugin_names) if difference: raise errors.BadConfigOption( 'Non-existent analysis plugins specified: {0:s}'.format( ' '.join(difference))) self._analysis_plugins = self._GetAnalysisPlugins(analysis_plugins) for analysis_plugin in self._analysis_plugins: helpers_manager.ArgumentHelperManager.ParseOptions( options, analysis_plugin)
288,969
Parses the filter options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseFilterOptions(self, options): self._event_filter_expression = self.ParseStringOption(options, 'filter') if self._event_filter_expression: self._event_filter = event_filter.EventObjectFilter() try: self._event_filter.CompileFilter(self._event_filter_expression) except errors.ParseError as exception: raise errors.BadConfigOption(( 'Unable to compile filter expression with error: ' '{0!s}').format(exception)) time_slice_event_time_string = getattr(options, 'slice', None) time_slice_duration = getattr(options, 'slice_size', 5) self._use_time_slicer = getattr(options, 'slicer', False) # The slice and slicer cannot be set at the same time. if time_slice_event_time_string and self._use_time_slicer: raise errors.BadConfigOption( 'Time slice and slicer cannot be used at the same time.') time_slice_event_timestamp = None if time_slice_event_time_string: # Note self._preferred_time_zone is None when not set but represents UTC. preferred_time_zone = self._preferred_time_zone or 'UTC' timezone = pytz.timezone(preferred_time_zone) time_slice_event_timestamp = timelib.Timestamp.FromTimeString( time_slice_event_time_string, timezone=timezone) if time_slice_event_timestamp is None: raise errors.BadConfigOption( 'Unsupported time slice event date and time: {0:s}'.format( time_slice_event_time_string)) if time_slice_event_timestamp is not None or self._use_time_slicer: # Note that time slicer uses the time slice to determine the duration. self._time_slice = time_slices.TimeSlice( time_slice_event_timestamp, duration=time_slice_duration)
288,970
Parses the informational options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseInformationalOptions(self, options): super(PsortTool, self)._ParseInformationalOptions(options) self._quiet_mode = getattr(options, 'quiet', False) helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['status_view'])
288,971
Parses the processing options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseProcessingOptions(self, options): argument_helper_names = [ 'process_resources', 'temporary_directory', 'zeromq'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) worker_memory_limit = getattr(options, 'worker_memory_limit', None) if worker_memory_limit and worker_memory_limit < 0: raise errors.BadConfigOption( 'Invalid worker memory limit value cannot be negative.') self._worker_memory_limit = worker_memory_limit
288,972
Adds processing options to the argument group Args: argument_group (argparse._ArgumentGroup): argparse argument group.
def AddProcessingOptions(self, argument_group): argument_helper_names = ['temporary_directory', 'zeromq'] if self._CanEnforceProcessMemoryLimit(): argument_helper_names.append('process_resources') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=argument_helper_names) argument_group.add_argument( '--worker-memory-limit', '--worker_memory_limit', dest='worker_memory_limit', action='store', type=int, metavar='SIZE', help=( 'Maximum amount of memory (data segment and shared memory) ' 'a worker process is allowed to consume in bytes, where 0 ' 'represents no limit. The default limit is 2147483648 (2 GiB). ' 'If a worker process exceeds this limit is is killed by the main ' '(foreman) process.'))
288,973
Parses the options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def ParseOptions(self, options): # The output modules options are dependent on the preferred language # and preferred time zone options. self._ParseTimezoneOption(options) names = ['analysis_plugins', 'language', 'profiling'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=names) self.list_analysis_plugins = self._analysis_plugins == 'list' self.list_language_identifiers = self._preferred_language == 'list' self.list_profilers = self._profilers == 'list' if (self.list_analysis_plugins or self.list_language_identifiers or self.list_profilers or self.list_timezones): return # Check output modules after the other listable options, otherwise # it could raise with "requires an output file". helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['output_modules']) self.list_output_modules = self._output_format == 'list' if self.list_output_modules: return self._ParseInformationalOptions(options) helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['data_location']) self._ParseLogFileOptions(options) self._ParseProcessingOptions(options) helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['event_filters']) self._deduplicate_events = getattr(options, 'dedup', True) if self._data_location: # Update the data location with the calculated value. options.data_location = self._data_location else: logger.warning('Unable to automatically determine data location.') self._command_line_arguments = self.GetCommandLineArguments() helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['storage_file']) # TODO: move check into _CheckStorageFile. if not self._storage_file_path: raise errors.BadConfigOption('Missing storage file option.') if not os.path.isfile(self._storage_file_path): raise errors.BadConfigOption( 'No such storage file: {0:s}.'.format(self._storage_file_path)) self._EnforceProcessMemoryLimit(self._process_memory_limit) self._analysis_plugins = self._CreateAnalysisPlugins(options) self._output_module = self._CreateOutputModule(options)
288,975
Initializes an event object. Args: uuid (uuid.UUID): UUID. origin (str): origin of the event (event source). E.g. the path of the corresponding LNK file or file reference MFT entry with the corresponding NTFS $OBJECT_ID attribute. Raises: ValueError: if the UUID version is not supported.
def __init__(self, uuid, origin): if uuid.version != 1: raise ValueError('Unsupported UUID version.') mac_address = '{0:s}:{1:s}:{2:s}:{3:s}:{4:s}:{5:s}'.format( uuid.hex[20:22], uuid.hex[22:24], uuid.hex[24:26], uuid.hex[26:28], uuid.hex[28:30], uuid.hex[30:32]) super(WindowsDistributedLinkTrackingEventData, self).__init__( data_type=self.DATA_TYPE) self.mac_address = mac_address # TODO: replace origin my something machine readable. self.origin = origin self.uuid = '{0!s}'.format(uuid)
288,977
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') yara_rules_string = None path = getattr(options, 'yara_rules_path', None) if path: try: with io.open(path, 'rt', encoding='utf-8') as rules_file: yara_rules_string = rules_file.read() except IOError as exception: raise errors.BadConfigObject( 'Unable to read Yara rules file: {0:s} with error: {1!s}'.format( path, exception)) try: # We try to parse the rules here, to check that the definitions are # valid. We then pass the string definitions along to the workers, so # that they don't need read access to the rules file. yara.compile(source=yara_rules_string) except yara.Error as exception: raise errors.BadConfigObject( 'Unable to parse Yara rules in: {0:s} with error: {1!s}'.format( path, exception)) setattr(configuration_object, '_yara_rules_string', yara_rules_string)
288,985