docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initializes the output module object. Args: output_mediator (OutputMediator): output mediator. Raises: ValueError: if the file handle is missing.
def __init__(self, output_mediator): super(SQLite4n6TimeOutputModule, self).__init__(output_mediator) self._connection = None self._count = 0 self._cursor = None self._filename = None
288,540
Query database for unique field types. Args: field_name (str): name of the filed to retrieve. Returns: dict[str, int]: counts of field types by name.
def _GetDistinctValues(self, field_name): self._cursor.execute( 'SELECT {0:s}, COUNT({0:s}) FROM log2timeline GROUP BY {0:s}'.format( field_name)) result = {} row = self._cursor.fetchone() while row: if row[0]: result[row[0]] = row[1] row = self._cursor.fetchone() return result
288,541
Writes the body of an event to the output. Args: event (EventObject): event.
def WriteEventBody(self, event): # sqlite seems to support milli seconds precision but that seems # not to be used by 4n6time row = self._GetSanitizedEventValues(event) self._cursor.execute(self._INSERT_QUERY, row) self._count += 1 # Commit the current transaction every 10000 inserts. if self._count % 10000 == 0: self._connection.commit() if self._set_status: self._set_status('Inserting event: {0:d}'.format(self._count))
288,545
Initializes a filter file. Args: path (str): path to a file that contains one or more path filters.
def __init__(self, path): super(FilterFile, self).__init__() self._path = path
288,546
Build find specification from a filter file. Args: environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables. Returns: list[dfvfs.FindSpec]: find specification.
def BuildFindSpecs(self, environment_variables=None): path_attributes = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.lower() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue # Remove the drive letter. if len(attribute_value) > 2 and attribute_value[1] == ':': _, _, attribute_value = attribute_value.rpartition(':') if attribute_value.startswith('\\'): attribute_value = attribute_value.replace('\\', '/') path_attributes[attribute_name] = attribute_value find_specs = [] with open(self._path, 'r') as file_object: for line in file_object: line = line.strip() if line.startswith('#'): continue if path_attributes: try: line = line.format(**path_attributes) except KeyError as exception: logger.error(( 'Unable to expand path filter: {0:s} with error: ' '{1!s}').format(line, exception)) continue if not line.startswith('/'): logger.warning(( 'The path filter must be defined as an absolute path: ' '{0:s}').format(line)) continue # Convert the path filters into a list of path segments and strip # the root path segment. path_segments = line.split('/') path_segments.pop(0) if not path_segments[-1]: logger.warning( 'Empty last path segment in path filter: {0:s}'.format(line)) continue find_spec = file_system_searcher.FindSpec( location_regex=path_segments, case_sensitive=False) find_specs.append(find_spec) return find_specs
288,547
Retrieves the parser names of specific preset category. Args: category (str): parser preset categories. Returns: list[str]: parser names in alphabetical order.
def _GetParsersFromPresetCategory(cls, category): preset_definition = cls._presets.GetPresetByName(category) if preset_definition is None: return [] preset_names = cls._presets.GetNames() parser_names = set() for element_name in preset_definition.parsers: if element_name in preset_names: category_parser_names = cls._GetParsersFromPresetCategory(element_name) parser_names.update(category_parser_names) else: parser_names.add(element_name) return sorted(parser_names)
288,549
Reduces the parsers and plugins to include and exclude. If an intersection is found, the parser or plugin is removed from the inclusion set. If a parser is not in inclusion set there is no need to have it in the exclusion set. Args: includes (dict[str, BaseParser]): included parsers and plugins by name. excludes (dict[str, BaseParser]): excluded parsers and plugins by name.
def _ReduceParserFilters(cls, includes, excludes): if not includes or not excludes: return for parser_name in set(includes).intersection(excludes): # Check parser and plugin list for exact equivalence. if includes[parser_name] == excludes[parser_name]: logger.warning( 'Parser {0:s} was in both the inclusion and exclusion lists. ' 'Ignoring included parser.'.format(parser_name)) includes.pop(parser_name) continue # Remove plugins that defined are in both inclusion and exclusion lists. plugin_includes = includes[parser_name] plugin_excludes = excludes[parser_name] intersection = set(plugin_includes).intersection(plugin_excludes) if not intersection: continue logger.warning( 'Parser {0:s} plugins: {1:s} in both the inclusion and exclusion ' 'lists. Ignoring included plugins.'.format( parser_name, ', '.join(intersection))) plugins_list = list(set(plugin_includes).difference(intersection)) includes[parser_name] = plugins_list # Remove excluded parsers that do not run. parsers_to_pop = [] for parser_name in excludes: if parser_name in includes: continue logger.warning( 'The excluded parser: {0:s} is not associated with the included ' 'parsers: {1:s}. Ignoring excluded parser.'.format( parser_name, ', '.join(includes.keys()))) parsers_to_pop.append(parser_name) for parser_name in parsers_to_pop: excludes.pop(parser_name)
288,550
Creates a signature scanner for format specifications with signatures. Args: specification_store (FormatSpecificationStore): format specifications with signatures. Returns: pysigscan.scanner: signature scanner.
def CreateSignatureScanner(cls, specification_store): scanner_object = pysigscan.scanner() for format_specification in specification_store.specifications: for signature in format_specification.signatures: pattern_offset = signature.offset if pattern_offset is None: signature_flags = pysigscan.signature_flags.NO_OFFSET elif pattern_offset < 0: pattern_offset *= -1 signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START scanner_object.add_signature( signature.identifier, pattern_offset, signature.pattern, signature_flags) return scanner_object
288,551
Retrieves the parser and parser plugin names. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[str]: parser and parser plugin names.
def GetParserAndPluginNames(cls, parser_filter_expression=None): parser_and_plugin_names = [] for parser_name, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): parser_and_plugin_names.append(parser_name) if parser_class.SupportsPlugins(): for plugin_name, _ in parser_class.GetPlugins(): parser_and_plugin_names.append( '{0:s}/{1:s}'.format(parser_name, plugin_name)) return parser_and_plugin_names
288,554
Retrieves the parser plugins information. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[tuple[str, str]]: pairs of parser plugin names and descriptions.
def GetParserPluginsInformation(cls, parser_filter_expression=None): parser_plugins_information = [] for _, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): if parser_class.SupportsPlugins(): for plugin_name, plugin_class in parser_class.GetPlugins(): description = getattr(plugin_class, 'DESCRIPTION', '') parser_plugins_information.append((plugin_name, description)) return parser_plugins_information
288,555
Retrieves a specific parser object by its name. Args: parser_name (str): name of the parser. Returns: BaseParser: parser object or None.
def GetParserObjectByName(cls, parser_name): parser_class = cls._parser_classes.get(parser_name, None) if parser_class: return parser_class() return None
288,556
Retrieves the parser objects. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: dict[str, BaseParser]: parsers per name.
def GetParserObjects(cls, parser_filter_expression=None): includes, excludes = cls._GetParserFilters(parser_filter_expression) parser_objects = {} for parser_name, parser_class in iter(cls._parser_classes.items()): # If there are no includes all parsers are included by default. if not includes and parser_name in excludes: continue if includes and parser_name not in includes: continue parser_object = parser_class() if parser_class.SupportsPlugins(): plugin_includes = None if parser_name in includes: plugin_includes = includes[parser_name] parser_object.EnablePlugins(plugin_includes) parser_objects[parser_name] = parser_object return parser_objects
288,557
Registers a parser class. The parser classes are identified based on their lower case name. Args: parser_class (type): parser class (subclass of BaseParser). Raises: KeyError: if parser class is already set for the corresponding name.
def RegisterParser(cls, parser_class): parser_name = parser_class.NAME.lower() if parser_name in cls._parser_classes: raise KeyError('Parser class already set for name: {0:s}.'.format( parser_class.NAME)) cls._parser_classes[parser_name] = parser_class
288,562
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() security = event_values.get('security', None) if security: security_flags = [] for flag, description in iter(self._SECURITY_VALUES.items()): if security & flag: security_flags.append(description) security_string = '0x{0:08x}: {1:s}'.format( security, ','.join(security_flags)) event_values['security'] = security_string for key, value in iter(event_values.items()): if isinstance(value, py2to3.BYTES_TYPE): event_values[key] = repr(value) return self._ConditionalFormatMessages(event_values)
288,563
Initializes the CLI tool object. Args: input_reader (Optional[InputReader]): input reader, where None indicates that the stdin input reader should be used. output_writer (Optional[OutputWriter]): output writer, where None indicates that the stdout output writer should be used.
def __init__(self, input_reader=None, output_writer=None): super(ImageExportTool, self).__init__( input_reader=input_reader, output_writer=output_writer) self._abort = False self._artifact_definitions_path = None self._artifact_filters = None self._artifacts_registry = None self._custom_artifacts_path = None self._destination_path = None self._digests = {} self._filter_collection = file_entry_filters.FileEntryFilterCollection() self._filter_file = None self._path_spec_extractor = extractors.PathSpecExtractor() self._process_memory_limit = None self._resolver_context = context.Context() self._skip_duplicates = True self._source_type = None self.has_filters = False self.list_signature_identifiers = False
288,564
Calculates a SHA-256 digest of the contents of the file entry. Args: file_entry (dfvfs.FileEntry): file entry whose content will be hashed. data_stream_name (str): name of the data stream whose content is to be hashed. Returns: str: hexadecimal representation of the SHA-256 hash or None if the digest cannot be determined.
def _CalculateDigestHash(self, file_entry, data_stream_name): file_object = file_entry.GetFileObject(data_stream_name=data_stream_name) if not file_object: return None try: file_object.seek(0, os.SEEK_SET) hasher_object = hashers_manager.HashersManager.GetHasher('sha256') data = file_object.read(self._READ_BUFFER_SIZE) while data: hasher_object.Update(data) data = file_object.read(self._READ_BUFFER_SIZE) finally: file_object.close() return hasher_object.GetStringDigest()
288,565
Extracts files. Args: source_path_specs (list[dfvfs.PathSpec]): path specifications to extract. destination_path (str): path where the extracted files should be stored. output_writer (CLIOutputWriter): output writer. skip_duplicates (Optional[bool]): True if files with duplicate content should be skipped.
def _Extract( self, source_path_specs, destination_path, output_writer, skip_duplicates=True): output_writer.Write('Extracting file entries.\n') path_spec_generator = self._path_spec_extractor.ExtractPathSpecs( source_path_specs, resolver_context=self._resolver_context) for path_spec in path_spec_generator: self._ExtractFileEntry( path_spec, destination_path, output_writer, skip_duplicates=skip_duplicates)
288,567
Extracts a data stream. Args: file_entry (dfvfs.FileEntry): file entry containing the data stream. data_stream_name (str): name of the data stream. destination_path (str): path where the extracted files should be stored. output_writer (CLIOutputWriter): output writer. skip_duplicates (Optional[bool]): True if files with duplicate content should be skipped.
def _ExtractDataStream( self, file_entry, data_stream_name, destination_path, output_writer, skip_duplicates=True): if not data_stream_name and not file_entry.IsFile(): return display_name = path_helper.PathHelper.GetDisplayNameForPathSpec( file_entry.path_spec) if skip_duplicates: try: digest = self._CalculateDigestHash(file_entry, data_stream_name) except (IOError, dfvfs_errors.BackEndError) as exception: output_writer.Write(( '[skipping] unable to read content of file entry: {0:s} ' 'with error: {1!s}\n').format(display_name, exception)) return if not digest: output_writer.Write( '[skipping] unable to read content of file entry: {0:s}\n'.format( display_name)) return duplicate_display_name = self._digests.get(digest, None) if duplicate_display_name: output_writer.Write(( '[skipping] file entry: {0:s} is a duplicate of: {1:s} with ' 'digest: {2:s}\n').format( display_name, duplicate_display_name, digest)) return self._digests[digest] = display_name target_directory, target_filename = self._CreateSanitizedDestination( file_entry, file_entry.path_spec, data_stream_name, destination_path) if not os.path.isdir(target_directory): os.makedirs(target_directory) target_path = os.path.join(target_directory, target_filename) if os.path.exists(target_path): output_writer.Write(( '[skipping] unable to export contents of file entry: {0:s} ' 'because exported file: {1:s} already exists.\n').format( display_name, target_path)) return try: self._WriteFileEntry(file_entry, data_stream_name, target_path) except (IOError, dfvfs_errors.BackEndError) as exception: output_writer.Write(( '[skipping] unable to export contents of file entry: {0:s} ' 'with error: {1!s}\n').format(display_name, exception)) try: os.remove(target_path) except (IOError, OSError): pass
288,568
Extracts a file entry. Args: path_spec (dfvfs.PathSpec): path specification of the source file. destination_path (str): path where the extracted files should be stored. output_writer (CLIOutputWriter): output writer. skip_duplicates (Optional[bool]): True if files with duplicate content should be skipped.
def _ExtractFileEntry( self, path_spec, destination_path, output_writer, skip_duplicates=True): file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec) if not file_entry: logger.warning('Unable to open file entry for path spec: {0:s}'.format( path_spec.comparable)) return if not self._filter_collection.Matches(file_entry): return file_entry_processed = False for data_stream in file_entry.data_streams: if self._abort: break self._ExtractDataStream( file_entry, data_stream.name, destination_path, output_writer, skip_duplicates=skip_duplicates) file_entry_processed = True if not file_entry_processed: self._ExtractDataStream( file_entry, '', destination_path, output_writer, skip_duplicates=skip_duplicates)
288,569
Retrieves the file system of the source. Args: source_path_spec (dfvfs.PathSpec): source path specification of the file system. resolver_context (dfvfs.Context): resolver context. Returns: tuple: containing: dfvfs.FileSystem: file system. dfvfs.PathSpec: mount point path specification that refers to the base location of the file system. Raises: RuntimeError: if source path specification is not set.
def _GetSourceFileSystem(self, source_path_spec, resolver_context=None): if not source_path_spec: raise RuntimeError('Missing source.') file_system = path_spec_resolver.Resolver.OpenFileSystem( source_path_spec, resolver_context=resolver_context) type_indicator = source_path_spec.type_indicator if path_spec_factory.Factory.IsSystemLevelTypeIndicator(type_indicator): mount_point = source_path_spec else: mount_point = source_path_spec.parent return file_system, mount_point
288,571
Parses the extensions string. Args: extensions_string (str): comma separated extensions to filter.
def _ParseExtensionsString(self, extensions_string): if not extensions_string: return extensions_string = extensions_string.lower() extensions = [ extension.strip() for extension in extensions_string.split(',')] file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions) self._filter_collection.AddFilter(file_entry_filter)
288,572
Parses the name string. Args: names_string (str): comma separated filenames to filter.
def _ParseNamesString(self, names_string): if not names_string: return names_string = names_string.lower() names = [name.strip() for name in names_string.split(',')] file_entry_filter = file_entry_filters.NamesFileEntryFilter(names) self._filter_collection.AddFilter(file_entry_filter)
288,573
Parses the filter options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseFilterOptions(self, options): names = ['artifact_filters', 'date_filters', 'filter_file'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=names) extensions_string = self.ParseStringOption(options, 'extensions_string') self._ParseExtensionsString(extensions_string) names_string = getattr(options, 'names_string', None) self._ParseNamesString(names_string) signature_identifiers = getattr(options, 'signature_identifiers', None) try: self._ParseSignatureIdentifiers( self._data_location, signature_identifiers) except (IOError, ValueError) as exception: raise errors.BadConfigOption(exception) if self._artifact_filters or self._filter_file: self.has_filters = True else: self.has_filters = self._filter_collection.HasFilters()
288,574
Reads the format specification file. Args: path (str): path of the format specification file. Returns: FormatSpecificationStore: format specification store.
def _ReadSpecificationFile(self, path): specification_store = specification.FormatSpecificationStore() with io.open( path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object: for line in file_object.readlines(): line = line.strip() if not line or line.startswith('#'): continue try: identifier, offset, pattern = line.split() except ValueError: logger.error('[skipping] invalid line: {0:s}'.format(line)) continue try: offset = int(offset, 10) except ValueError: logger.error('[skipping] invalid offset in line: {0:s}'.format(line)) continue try: # TODO: find another way to do this that doesn't use an undocumented # API. pattern = codecs.escape_decode(pattern)[0] # ValueError is raised e.g. when the patterns contains "\xg1". except ValueError: logger.error( '[skipping] invalid pattern in line: {0:s}'.format(line)) continue format_specification = specification.FormatSpecification(identifier) format_specification.AddNewSignature(pattern, offset=offset) specification_store.AddSpecification(format_specification) return specification_store
288,576
Writes the contents of the source file entry to a destination file. Note that this function will overwrite an existing file. Args: file_entry (dfvfs.FileEntry): file entry whose content is to be written. data_stream_name (str): name of the data stream whose content is to be written. destination_file (str): path of the destination file.
def _WriteFileEntry(self, file_entry, data_stream_name, destination_file): source_file_object = file_entry.GetFileObject( data_stream_name=data_stream_name) if not source_file_object: return try: with open(destination_file, 'wb') as destination_file_object: source_file_object.seek(0, os.SEEK_SET) data = source_file_object.read(self._COPY_BUFFER_SIZE) while data: destination_file_object.write(data) data = source_file_object.read(self._COPY_BUFFER_SIZE) finally: source_file_object.close()
288,577
Adds the filter options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
def AddFilterOptions(self, argument_group): names = ['artifact_filters', 'date_filters', 'filter_file'] helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, names=names) argument_group.add_argument( '-x', '--extensions', dest='extensions_string', action='store', type=str, metavar='EXTENSIONS', help=( 'Filter on file name extensions. This option accepts multiple ' 'multiple comma separated values e.g. "csv,docx,pst".')) argument_group.add_argument( '--names', dest='names_string', action='store', type=str, metavar='NAMES', help=( 'Filter on file names. This option accepts a comma separated ' 'string denoting all file names, e.g. -x ' '"NTUSER.DAT,UsrClass.dat".')) argument_group.add_argument( '--signatures', dest='signature_identifiers', action='store', type=str, metavar='IDENTIFIERS', help=( 'Filter on file format signature identifiers. This option ' 'accepts multiple comma separated values e.g. "esedb,lnk". ' 'Use "list" to show an overview of the supported file format ' 'signatures.'))
288,578
Parses the options and initializes the front-end. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def ParseOptions(self, options): # The data location is required to list signatures. helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['data_location']) # Check the list options first otherwise required options will raise. signature_identifiers = self.ParseStringOption( options, 'signature_identifiers') if signature_identifiers == 'list': self.list_signature_identifiers = True if self.list_signature_identifiers: return self._ParseInformationalOptions(options) self._ParseLogFileOptions(options) self._ParseStorageMediaOptions(options) self._destination_path = self.ParseStringOption( options, 'path', default_value='export') if not self._data_location: logger.warning('Unable to automatically determine data location.') argument_helper_names = ['artifact_definitions', 'process_resources'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) self._ParseFilterOptions(options) if (getattr(options, 'no_vss', False) or getattr(options, 'include_duplicates', False)): self._skip_duplicates = False self._EnforceProcessMemoryLimit(self._process_memory_limit)
288,580
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() restore_point_event_type = event_values.get( 'restore_point_event_type', None) if restore_point_event_type is not None: event_values['restore_point_event_type'] = ( self._RESTORE_POINT_EVENT_TYPES.get( restore_point_event_type, 'UNKNOWN')) restore_point_type = event_values.get('restore_point_type', None) if restore_point_type is not None: event_values['restore_point_type'] = ( self._RESTORE_POINT_EVENT_TYPES.get(restore_point_type, 'UNKNOWN')) return self._ConditionalFormatMessages(event_values)
288,582
Converts a binary data value into a floating-point value. Args: value (bytes): binary data value containing an ASCII string or None. Returns: float: floating-point representation of binary data value or None if value is not set. Raises: ParseError: if the floating-point value data size is not supported or if the value cannot be parsed.
def _ConvertValueBinaryDataToFloatingPointValue(self, value): if not value: return None value_length = len(value) if value_length not in (4, 8): raise errors.ParseError('Unsupported value data size: {0:d}'.format( value_length)) if value_length == 4: floating_point_map = self._GetDataTypeMap('float32le') elif value_length == 8: floating_point_map = self._GetDataTypeMap('float64le') try: return self._ReadStructureFromByteStream(value, 0, floating_point_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse floating-point value with error: {0!s}'.format( exception))
288,586
Extracts an identifier mapping from a SruDbIdMapTable record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. table_name (str): name of the table the record is stored in. esedb_record (pyesedb.record): record. Returns: tuple[int, str]: numeric identifier and its string representation or None, None if no identifier mapping can be retrieved from the record.
def _ParseIdentifierMappingRecord( self, parser_mediator, table_name, esedb_record): record_values = self._GetRecordValues( parser_mediator, table_name, esedb_record) identifier = record_values.get('IdIndex', None) if identifier is None: parser_mediator.ProduceExtractionWarning( 'IdIndex value missing from table: SruDbIdMapTable') return None, None identifier_type = record_values.get('IdType', None) if identifier_type not in self._SUPPORTED_IDENTIFIER_TYPES: parser_mediator.ProduceExtractionWarning( 'unsupported IdType value: {0!s} in table: SruDbIdMapTable'.format( identifier_type)) return None, None mapped_value = record_values.get('IdBlob', None) if mapped_value is None: parser_mediator.ProduceExtractionWarning( 'IdBlob value missing from table: SruDbIdMapTable') return None, None if identifier_type == 3: try: fwnt_identifier = pyfwnt.security_identifier() fwnt_identifier.copy_from_byte_stream(mapped_value) mapped_value = fwnt_identifier.get_string() except IOError: parser_mediator.ProduceExtractionWarning( 'unable to decode IdBlob value as Windows NT security identifier') return None, None else: try: mapped_value = mapped_value.decode('utf-16le').rstrip('\0') except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to decode IdBlob value as UTF-16 little-endian string') return None, None return identifier, mapped_value
288,589
Extracts identifier mappings from the SruDbIdMapTable table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. esedb_table (pyesedb.table): table. Returns: dict[int, str]: mapping of numeric identifiers to their string representation.
def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table): identifier_mappings = {} for esedb_record in esedb_table.records: if parser_mediator.abort: break identifier, mapped_value = self._ParseIdentifierMappingRecord( parser_mediator, esedb_table.name, esedb_record) if identifier is None or mapped_value is None: continue if identifier in identifier_mappings: parser_mediator.ProduceExtractionWarning( 'identifier: {0:d} already exists in mappings.'.format(identifier)) continue identifier_mappings[identifier] = mapped_value return identifier_mappings
288,590
Parses the application resource usage table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache, which contains information about the identifiers stored in the SruDbIdMapTable table. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table.
def ParseApplicationResourceUsage( self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs): self._ParseGUIDTable( parser_mediator, cache, database, table, self._APPLICATION_RESOURCE_USAGE_VALUES_MAP, SRUMApplicationResourceUsageEventData)
288,591
Parses the network data usage monitor table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache, which contains information about the identifiers stored in the SruDbIdMapTable table. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table.
def ParseNetworkDataUsage( self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs): self._ParseGUIDTable( parser_mediator, cache, database, table, self._NETWORK_DATA_USAGE_VALUES_MAP, SRUMNetworkDataUsageEventData)
288,592
Parses the network connectivity usage monitor table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache, which contains information about the identifiers stored in the SruDbIdMapTable table. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table.
def ParseNetworkConnectivityUsage( self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs): # TODO: consider making ConnectStartTime + ConnectedTime an event. self._ParseGUIDTable( parser_mediator, cache, database, table, self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP, SRUMNetworkConnectivityUsageEventData)
288,593
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): for subkey in registry_key.GetSubkeys(): values_dict = {} values_dict['subkey_name'] = subkey.name vendor_identification = None product_identification = None try: subkey_name_parts = subkey.name.split('&') if len(subkey_name_parts) >= 2: vendor_identification = subkey_name_parts[0] product_identification = subkey_name_parts[1] except ValueError as exception: logger.warning( 'Unable to split string: {0:s} with error: {1!s}'.format( subkey.name, exception)) if vendor_identification and product_identification: values_dict['vendor'] = vendor_identification values_dict['product'] = product_identification for devicekey in subkey.GetSubkeys(): values_dict['serial'] = devicekey.name event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND # Last USB connection per USB device recorded in the Registry. event = time_events.DateTimeValuesEvent( devicekey.last_written_time, definitions.TIME_DESCRIPTION_LAST_CONNECTED) parser_mediator.ProduceEventWithEventData(event, event_data)
288,594
Formats a packed IPv6 address as a human readable string. Args: packed_ip_address (list[int]): packed IPv6 address. Returns: str: human readable IPv6 address.
def _FormatPackedIPv6Address(self, packed_ip_address): # Note that socket.inet_ntop() is not supported on Windows. octet_pairs = zip(packed_ip_address[0::2], packed_ip_address[1::2]) octet_pairs = [octet1 << 8 | octet2 for octet1, octet2 in octet_pairs] # TODO: omit ":0000" from the string. return ':'.join([ '{0:04x}'.format(octet_pair) for octet_pair in octet_pairs])
288,596
Initializes an event source. Args: path_spec (Optional[dfvfs.PathSpec]): path specification.
def __init__(self, path_spec=None): super(EventSource, self).__init__() self.data_type = self.DATA_TYPE self.file_entry_type = None self.path_spec = path_spec
288,598
Initializes an Elasticsearch output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
def __init__(self, output_mediator): super(ElasticsearchOutputModule, self).__init__(output_mediator) self._raw_fields = False
288,599
Set raw (non-analyzed) fields. This is used for sorting and aggregations in Elasticsearch. https://www.elastic.co/guide/en/elasticsearch/guide/current/ multi-fields.html Args: raw_fields (bool): True if raw (non-analyzed) fields should be added.
def SetRawFields(self, raw_fields): self._raw_fields = raw_fields if raw_fields: logger.debug('Elasticsearch adding raw (non-analyzed) fields.') else: logger.debug('Elasticsearch not adding raw (non-analyzed) fields.')
288,600
Initializes an extraction warning. Args: message (Optional[str]): warning message. parser_chain (Optional[str]): parser chain to which the warning applies. path_spec (Optional[dfvfs.PathSpec]): path specification of the file entry to which the warning applies.
def __init__(self, message=None, parser_chain=None, path_spec=None): super(ExtractionWarning, self).__init__() self.message = message self.parser_chain = parser_chain self.path_spec = path_spec
288,602
Searches the plist key hierarchy for keys with matching names. If a match is found a tuple of the key name and value is added to the matches list. Args: key (dict[str, object]): plist key. names (list[str]): names of the keys to match. matches (list[str]): keys with matching names.
def _FindKeys(self, key, names, matches): for name, subkey in iter(key.items()): if name in names: matches.append((name, subkey)) if isinstance(subkey, dict): self._FindKeys(subkey, names, matches)
288,603
Parses file content (data) for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileData(self, knowledge_base, file_object): plist_file = plist.PlistFile() try: plist_file.Read(file_object) except IOError as exception: raise errors.PreProcessFail( 'Unable to read: {0:s} with error: {1!s}'.format( self.ARTIFACT_DEFINITION_NAME, exception)) if not plist_file.root_key: raise errors.PreProcessFail(( 'Unable to read: {0:s} with error: missing root key').format( self.ARTIFACT_DEFINITION_NAME)) matches = [] self._FindKeys(plist_file.root_key, self._PLIST_KEYS, matches) if not matches: raise errors.PreProcessFail( 'Unable to read: {0:s} with error: no such keys: {1:s}.'.format( self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS))) name = None value = None for name, value in matches: if value: break if value is None: raise errors.PreProcessFail(( 'Unable to read: {0:s} with error: no values found for keys: ' '{1:s}.').format( self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS))) self._ParsePlistKeyValue(knowledge_base, name, value)
288,604
Parses a plist key value. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. name (str): name of the plist key. value (str): value of the plist key.
def _ParsePlistKeyValue(self, knowledge_base, name, value): if not knowledge_base.GetHostname(): if name in self._PLIST_KEYS: hostname_artifact = artifacts.HostnameArtifact(name=value) knowledge_base.SetHostname(hostname_artifact)
288,605
Parses a plist key value. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. name (str): name of the plist key. value (str): value of the plist key.
def _ParsePlistKeyValue(self, knowledge_base, name, value): if not knowledge_base.GetValue('keyboard_layout'): if name in self._PLIST_KEYS: if isinstance(value, (list, tuple)): value = value[0] _, _, keyboard_layout = value.rpartition('.') knowledge_base.SetValue('keyboard_layout', keyboard_layout)
288,606
Parses a plist key value. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. name (str): name of the plist key. value (str): value of the plist key.
def _ParsePlistKeyValue(self, knowledge_base, name, value): if not knowledge_base.GetValue('operating_system_version'): if name in self._PLIST_KEYS: knowledge_base.SetValue('operating_system_version', value)
288,607
Parses artifact file system data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_entry (dfvfs.FileEntry): file entry that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileEntry(self, knowledge_base, file_entry): if not file_entry or not file_entry.link: raise errors.PreProcessFail( 'Unable to read: {0:s} with error: not a symbolic link'.format( self.ARTIFACT_DEFINITION_NAME)) _, _, time_zone = file_entry.link.partition('zoneinfo/') # TODO: check if time zone is set in knowledge base. if time_zone: try: knowledge_base.SetTimeZone(time_zone) except ValueError: # TODO: add and store preprocessing errors. pass
288,608
Retrieves plist keys, defaulting to empty values. Args: top_level (plistlib._InternalDict): top level plist object. keys (set[str]): names of keys that should be returned. depth (int): depth within the plist, where 1 is top level. Returns: dict[str, str]: values of the requested keys.
def _GetKeysDefaultEmpty(self, top_level, keys, depth=1): keys = set(keys) match = {} if depth == 1: for key in keys: value = top_level.get(key, None) if value is not None: match[key] = value else: for _, parsed_key, parsed_value in plist_interface.RecurseKey( top_level, depth=depth): if parsed_key in keys: match[parsed_key] = parsed_value if set(match.keys()) == keys: return match return match
288,609
Retrieves the root key of a plist file. Args: file_entry (dfvfs.FileEntry): file entry of the plist. Returns: dict[str, object]: plist root key. Raises: errors.PreProcessFail: if the preprocessing fails.
def _GetPlistRootKey(self, file_entry): file_object = file_entry.GetFileObject() try: plist_file = plist.PlistFile() plist_file.Read(file_object) except IOError as exception: location = getattr(file_entry.path_spec, 'location', '') raise errors.PreProcessFail( 'Unable to read plist file: {0:s} with error: {1!s}'.format( location, exception)) finally: file_object.close() return plist_file.root_key
288,610
Parses artifact file system data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_entry (dfvfs.FileEntry): file entry that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileEntry(self, knowledge_base, file_entry): root_key = self._GetPlistRootKey(file_entry) if not root_key: location = getattr(file_entry.path_spec, 'location', '') raise errors.PreProcessFail(( 'Unable to read: {0:s} plist: {1:s} with error: missing root ' 'key.').format(self.ARTIFACT_DEFINITION_NAME, location)) try: match = self._GetKeysDefaultEmpty(root_key, self._KEYS) except KeyError as exception: location = getattr(file_entry.path_spec, 'location', '') raise errors.PreProcessFail( 'Unable to read: {0:s} plist: {1:s} with error: {2!s}'.format( self.ARTIFACT_DEFINITION_NAME, location, exception)) name = match.get('name', [None])[0] uid = match.get('uid', [None])[0] if not name or not uid: # TODO: add and store preprocessing errors. return user_account = artifacts.UserAccountArtifact( identifier=uid, username=name) user_account.group_identifier = match.get('gid', [None])[0] user_account.full_name = match.get('realname', [None])[0] user_account.shell = match.get('shell', [None])[0] user_account.user_directory = match.get('home', [None])[0] try: knowledge_base.AddUserAccount(user_account) except KeyError: # TODO: add and store preprocessing errors. pass
288,611
Parses a cookie row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) cookie_data = self._GetRowValue(query_hash, row, 'value') cookie_name = self._GetRowValue(query_hash, row, 'name') hostname = self._GetRowValue(query_hash, row, 'host') if hostname.startswith('.'): hostname = hostname[1:] is_secure = bool(self._GetRowValue(query_hash, row, 'isSecure')) if is_secure: url_scheme = 'https' else: url_scheme = 'http' path = self._GetRowValue(query_hash, row, 'path') url = '{0:s}://{1:s}{2:s}'.format(url_scheme, hostname, path) event_data = FirefoxCookieEventData() event_data.cookie_name = cookie_name event_data.data = cookie_data event_data.host = hostname event_data.httponly = bool(self._GetRowValue(query_hash, row, 'isHttpOnly')) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.path = path event_data.query = query event_data.secure = is_secure event_data.url = url timestamp = self._GetRowValue(query_hash, row, 'creationTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastAccessed') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'expiry') if timestamp: # Expiry time (nsCookieService::GetExpiry in # netwerk/cookie/nsCookieService.cpp). # It's calculated as the difference between the server time and the time # the server wants the cookie to expire and adding that difference to the # client time. This localizes the client time regardless of whether or not # the TZ environment variable was set on the client. date_time = dfdatetime_posix_time.PosixTime( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) # Go through all cookie plugins to see if there are is any specific parsing # needed. for cookie_plugin in self._cookie_plugins: try: cookie_plugin.UpdateChainAndProcess( parser_mediator, cookie_name=cookie_name, cookie_data=cookie_data, url=url) except errors.WrongPlugin: pass
288,613
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): values_dict = {} if registry_key.number_of_values == 0: values_dict['Value'] = 'No values stored in key.' else: for registry_value in registry_key.GetValues(): value_name = registry_value.name or '(default)' if registry_value.data is None: value_string = '[{0:s}] Empty'.format( registry_value.data_type_string) elif registry_value.DataIsString(): value_string = registry_value.GetDataAsObject() value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, value_string) elif registry_value.DataIsInteger(): value_integer = registry_value.GetDataAsObject() value_string = '[{0:s}] {1:d}'.format( registry_value.data_type_string, value_integer) elif registry_value.DataIsMultiString(): multi_string = registry_value.GetDataAsObject() if not isinstance(multi_string, (list, tuple)): value_string = '[{0:s}]'.format(registry_value.data_type_string) # TODO: Add a flag or some sort of an anomaly alert. else: value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, ''.join(multi_string)) else: value_string = '[{0:s}]'.format(registry_value.data_type_string) values_dict[value_name] = value_string event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,617
Extracts relevant BT entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): device_cache = match.get('DeviceCache', {}) for device, value in iter(device_cache.items()): name = value.get('Name', '') if name: name = ''.join(('Name:', name)) event_data = plist_event.PlistTimeEventData() event_data.root = '/DeviceCache' datetime_value = value.get('LastInquiryUpdate', None) if datetime_value: event_data.desc = ' '.join( filter(None, ('Bluetooth Discovery', name))) event_data.key = '{0:s}/LastInquiryUpdate'.format(device) event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) if device in match.get('PairedDevices', []): event_data.desc = 'Paired:True {0:s}'.format(name) event_data.key = device event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) datetime_value = value.get('LastNameUpdate', None) if datetime_value: event_data.desc = ' '.join(filter(None, ('Device Name Set', name))) event_data.key = '{0:s}/LastNameUpdate'.format(device) event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) datetime_value = value.get('LastServicesUpdate', None) if datetime_value: event_data.desc = ' '.join(filter(None, ('Services Updated', name))) event_data.key = '{0:s}/LastServicesUpdate'.format(device) event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,618
Parses file content (data) for a hostname preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileData(self, knowledge_base, file_object): text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8') if not knowledge_base.GetHostname(): hostname = text_file_object.readline() hostname = hostname.strip() if hostname: hostname_artifact = artifacts.HostnameArtifact(name=hostname) knowledge_base.SetHostname(hostname_artifact)
288,619
Parses file content (data) for system product preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileData(self, knowledge_base, file_object): text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8') system_product = text_file_object.readline() system_product = system_product.strip() if not knowledge_base.GetValue('operating_system_product'): if system_product: knowledge_base.SetValue('operating_system_product', system_product)
288,620
Parses file content (data) for system product preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileData(self, knowledge_base, file_object): text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8') system_product = text_file_object.readline() # Only parse known default /etc/issue file contents. if system_product.startswith('Debian GNU/Linux '): system_product, _, _ = system_product.partition('\\') system_product = system_product.rstrip() else: system_product = None if not knowledge_base.GetValue('operating_system_product'): if system_product: knowledge_base.SetValue('operating_system_product', system_product)
288,621
Parses file content (data) for system product preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileData(self, knowledge_base, file_object): text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8') product_values = {} for line in text_file_object.readlines(): line = line.strip() if line.startswith('#'): continue key, value = line.split('=') key = key.strip().upper() value = value.strip().strip('"') product_values[key] = value if not knowledge_base.GetValue('operating_system_product'): system_product = product_values.get('DISTRIB_DESCRIPTION', None) if system_product: knowledge_base.SetValue('operating_system_product', system_product)
288,622
Parses artifact file system data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_entry (dfvfs.FileEntry): file entry that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileEntry(self, knowledge_base, file_entry): if file_entry.link: # Determine the timezone based on the file path. _, _, time_zone = file_entry.link.partition('zoneinfo/') else: # Determine the timezone based on the timezone information file. file_object = file_entry.GetFileObject() time_zone = None try: time_zone_file = tz.tzfile(file_object) date_time = datetime.datetime(2017, 1, 1) time_zone = time_zone_file.tzname(date_time) except ValueError: # TODO: add and store preprocessing errors. logger.error('Unable to read time zone information file.') finally: file_object.close() # TODO: check if time zone is set in knowledge base. if time_zone: try: knowledge_base.SetTimeZone(time_zone) except ValueError: # TODO: add and store preprocessing errors. logger.error('Unable to set time zone in knowledge base.')
288,623
Parses file content (data) for user account preprocessing attributes. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseFileData(self, knowledge_base, file_object): line_reader = line_reader_file.BinaryLineReader(file_object) try: reader = line_reader_file.BinaryDSVReader(line_reader, b':') except csv.Error as exception: raise errors.PreProcessFail( 'Unable to read: {0:s} with error: {1!s}'.format( self.ARTIFACT_DEFINITION_NAME, exception)) for row in reader: if len(row) < 7 or not row[0] or not row[2]: # TODO: add and store preprocessing errors. continue try: username = row[0].decode('utf-8') except UnicodeDecodeError: # TODO: add and store preprocessing errors. logger.error('Unable to decode username.') continue try: identifier = row[2].decode('utf-8') except UnicodeDecodeError: # TODO: add and store preprocessing errors. logger.error('Unable to decode identifier.') continue group_identifier = None if row[3]: try: group_identifier = row[3].decode('utf-8') except UnicodeDecodeError: # TODO: add and store preprocessing errors. logger.error('Unable to decode group identifier.') full_name = None if row[4]: try: full_name = row[4].decode('utf-8') except UnicodeDecodeError: # TODO: add and store preprocessing errors. logger.error('Unable to decode full name.') user_directory = None if row[5]: try: user_directory = row[5].decode('utf-8') except UnicodeDecodeError: # TODO: add and store preprocessing errors. logger.error('Unable to decode user directory.') shell = None if row[6]: try: shell = row[6].decode('utf-8') except UnicodeDecodeError: # TODO: add and store preprocessing errors. logger.error('Unable to decode shell.') user_account = artifacts.UserAccountArtifact( identifier=identifier, username=username) user_account.group_identifier = group_identifier user_account.full_name = full_name user_account.user_directory = user_directory user_account.shell = shell try: knowledge_base.AddUserAccount(user_account) except KeyError: # TODO: add and store preprocessing errors. pass
288,624
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--index_name', dest='index_name', type=str, action='store', default=cls._DEFAULT_INDEX_NAME, help=( 'Name of the index in ElasticSearch.')) argument_group.add_argument( '--doc_type', dest='document_type', type=str, action='store', default=cls._DEFAULT_DOCUMENT_TYPE, help=( 'Name of the document type that will be used in ElasticSearch.')) argument_group.add_argument( '--flush_interval', dest='flush_interval', type=int, action='store', default=cls._DEFAULT_FLUSH_INTERVAL, help=( 'Events to queue up before bulk insert to ElasticSearch.')) argument_group.add_argument( '--raw_fields', dest='raw_fields', action='store_true', default=cls._DEFAULT_RAW_FIELDS, help=( 'Export string fields that will not be analyzed by Lucene.')) argument_group.add_argument( '--elastic_user', dest='elastic_user', action='store', default=cls._DEFAULT_ELASTIC_USER, help=( 'Username to use for Elasticsearch authentication.')) argument_group.add_argument( '--use_ssl', dest='use_ssl', action='store_true', help='Enforces use of ssl.') argument_group.add_argument( '--ca_certificates_file_path', dest='ca_certificates_file_path', action='store', type=str, default=cls._DEFAULT_CA_CERTS, help=( 'Path to a file containing a list of root certificates to trust.')) argument_group.add_argument( '--elastic_url_prefix', dest='elastic_url_prefix', type=str, action='store', default=cls._DEFAULT_URL_PREFIX, help=( 'URL prefix for elastic search.')) ElasticSearchServerArgumentsHelper.AddArguments(argument_group)
288,625
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (OutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
def ParseOptions(cls, options, output_module): elastic_output_modules = ( elastic.ElasticsearchOutputModule, elastic.ElasticsearchOutputModule) if not isinstance(output_module, elastic_output_modules): raise errors.BadConfigObject( 'Output module is not an instance of ElasticsearchOutputModule') index_name = cls._ParseStringOption( options, 'index_name', default_value=cls._DEFAULT_INDEX_NAME) document_type = cls._ParseStringOption( options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE) flush_interval = cls._ParseNumericOption( options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL) raw_fields = getattr( options, 'raw_fields', cls._DEFAULT_RAW_FIELDS) elastic_user = cls._ParseStringOption( options, 'elastic_user', default_value=cls._DEFAULT_ELASTIC_USER) use_ssl = getattr(options, 'use_ssl', False) ca_certificates_path = cls._ParseStringOption( options, 'ca_certificates_file_path', default_value=cls._DEFAULT_CA_CERTS) elastic_url_prefix = cls._ParseStringOption( options, 'elastic_url_prefix', default_value=cls._DEFAULT_URL_PREFIX) if elastic_user is not None: elastic_password = getpass.getpass( 'Enter your Elasticsearch password: ') else: elastic_password = None ElasticSearchServerArgumentsHelper.ParseOptions(options, output_module) output_module.SetIndexName(index_name) output_module.SetDocumentType(document_type) output_module.SetFlushInterval(flush_interval) output_module.SetRawFields(raw_fields) output_module.SetUsername(elastic_user) output_module.SetPassword(elastic_password) output_module.SetUseSSL(use_ssl) output_module.SetCACertificatesPath(ca_certificates_path) output_module.SetURLPrefix(elastic_url_prefix)
288,626
Converts a dictionary into a list of strings. Args: data_dict (dict[str, object]): dictionary to convert. Returns: list[str]: list of strings.
def _DictToListOfStrings(self, data_dict): ret_list = [] for key, value in iter(data_dict.items()): if key in ('body', 'datetime', 'type', 'room', 'rooms', 'id'): continue ret_list.append('{0:s} = {1!s}'.format(key, value)) return ret_list
288,628
Extracts values from a JQuery string. Args: jquery_raw (str): JQuery string. Returns: dict[str, str]: extracted values.
def _ExtractJQuery(self, jquery_raw): data_part = '' if not jquery_raw: return {} if '[' in jquery_raw: _, _, first_part = jquery_raw.partition('[') data_part, _, _ = first_part.partition(']') elif jquery_raw.startswith('//'): _, _, first_part = jquery_raw.partition('{') data_part = '{{{0:s}'.format(first_part) elif '({' in jquery_raw: _, _, first_part = jquery_raw.partition('(') data_part, _, _ = first_part.rpartition(')') if not data_part: return {} try: data_dict = json.loads(data_part) except ValueError: return {} return data_dict
288,629
Parses chat comment data. Args: data (dict[str, object]): chat comment data as returned by SQLite. Returns: dict[str, object]: parsed chat comment data.
def _ParseChatData(self, data): data_store = {} if 'body' in data: body = data.get('body', '').replace('\n', ' ') if body.startswith('//') and '{' in body: body_dict = self._ExtractJQuery(body) title, _, _ = body.partition('{') body = '{0:s} <{1!s}>'.format( title[2:], self._DictToListOfStrings(body_dict)) else: body = 'No text.' data_store['text'] = body room = data.get('rooms', None) if not room: room = data.get('room', None) if room: data_store['room'] = room data_store['id'] = data.get('id', None) user = data.get('user', None) if user: try: user_sid = int(user) data_store['sid'] = user_sid except (ValueError, TypeError): data_store['user'] = user return data_store
288,630
Parses a single row from the receiver and cache response table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseReceiverData( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) data = {} key_url = self._GetRowValue(query_hash, row, 'request_key') data_dict = {} description = 'MacKeeper Entry' # Check the URL, since that contains vital information about the type of # event we are dealing with. if key_url.endswith('plist'): description = 'Configuration Definition' data['text'] = 'Plist content added to cache.' elif key_url.startswith('http://event.zeobit.com'): description = 'MacKeeper Event' try: _, _, part = key_url.partition('?') data['text'] = part.replace('&', ' ') except UnicodeDecodeError: data['text'] = 'N/A' elif key_url.startswith('http://account.zeobit.com'): description = 'Account Activity' _, _, activity = key_url.partition('#') if activity: data['text'] = 'Action started: {0:s}'.format(activity) else: data['text'] = 'Unknown activity.' elif key_url.startswith('http://support.') and 'chat' in key_url: description = 'Chat ' try: jquery = self._GetRowValue(query_hash, row, 'data') jquery = codecs.decode(jquery, 'utf-8') except UnicodeDecodeError: jquery = '' data_dict = self._ExtractJQuery(jquery) data = self._ParseChatData(data_dict) data['entry_type'] = data_dict.get('type', '') if data['entry_type'] == 'comment': description += 'Comment' elif data['entry_type'] == 'outgoing': description += 'Outgoing Message' elif data['entry_type'] == 'incoming': description += 'Incoming Message' else: # Empty or not known entry type, generic status message. description += 'Entry' data['text'] = ';'.join(self._DictToListOfStrings(data_dict)) if not data['text']: data['text'] = 'No additional data.' event_data = MacKeeperCacheEventData() event_data.description = description event_data.event_type = data.get('event_type', None) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.record_id = data.get('id', None) event_data.room = data.get('room', None) event_data.text = data.get('text', None) event_data.url = key_url event_data.user_name = data.get('user', None) event_data.user_sid = data.get('sid', None) time_value = self._GetRowValue(query_hash, row, 'time_string') if isinstance(time_value, py2to3.INTEGER_TYPES): date_time = dfdatetime_java_time.JavaTime(timestamp=time_value) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) else: try: timestamp = timelib.Timestamp.FromTimeString(time_value) except errors.TimestampError: parser_mediator.ProduceExtractionWarning( 'Unable to parse time string: {0:s}'.format(time_value)) return event = time_events.TimestampEvent( timestamp, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
288,631
Deregisters a formatter class. The formatter classes are identified based on their lower case data type. Args: formatter_class (type): class of the formatter. Raises: KeyError: if formatter class is not set for the corresponding data type.
def DeregisterFormatter(cls, formatter_class): formatter_data_type = formatter_class.DATA_TYPE.lower() if formatter_data_type not in cls._formatter_classes: raise KeyError( 'Formatter class not set for data type: {0:s}.'.format( formatter_class.DATA_TYPE)) del cls._formatter_classes[formatter_data_type]
288,632
Retrieves the formatter object for a specific data type. Args: data_type (str): data type. Returns: EventFormatter: corresponding formatter or the default formatter if not available.
def GetFormatterObject(cls, data_type): data_type = data_type.lower() if data_type not in cls._formatter_objects: formatter_object = None if data_type in cls._formatter_classes: formatter_class = cls._formatter_classes[data_type] # TODO: remove the need to instantiate the Formatter classes # and use class methods only. formatter_object = formatter_class() if not formatter_object: logger.warning( 'Using default formatter for data type: {0:s}'.format(data_type)) formatter_object = default.DefaultFormatter() cls._formatter_objects[data_type] = formatter_object return cls._formatter_objects[data_type]
288,633
Retrieves the formatted message strings for a specific event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: list[str, str]: long and short version of the message string.
def GetMessageStrings(cls, formatter_mediator, event): formatter_object = cls.GetFormatterObject(event.data_type) return formatter_object.GetMessages(formatter_mediator, event)
288,634
Retrieves the formatted source strings for a specific event object. Args: event (EventObject): event. Returns: list[str, str]: short and long version of the source of the event.
def GetSourceStrings(cls, event): # TODO: change this to return the long variant first so it is consistent # with GetMessageStrings. formatter_object = cls.GetFormatterObject(event.data_type) return formatter_object.GetSources(event)
288,635
Retrieves all PE section names. Args: pefile_object (pefile.PE): pefile object. Returns: list[str]: names of the sections.
def _GetSectionNames(self, pefile_object): section_names = [] for section in pefile_object.sections: section_name = getattr(section, 'Name', b'') # Ensure the name is decoded correctly. try: section_name = '{0:s}'.format(section_name.decode('unicode_escape')) except UnicodeDecodeError: section_name = '{0:s}'.format(repr(section_name)) section_names.append(section_name) return section_names
288,637
Retrieves timestamps from the import directory, if available. Args: pefile_object (pefile.PE): pefile object. Returns: list[int]: import timestamps.
def _GetImportTimestamps(self, pefile_object): import_timestamps = [] if not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT'): return import_timestamps for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT: dll_name = getattr(importdata, 'dll', '') try: dll_name = dll_name.decode('ascii') except UnicodeDecodeError: dll_name = dll_name.decode('ascii', errors='replace') if not dll_name: dll_name = '<NO DLL NAME>' timestamp = getattr(importdata.struct, 'TimeDateStamp', 0) if timestamp: import_timestamps.append([dll_name, timestamp]) return import_timestamps
288,638
Retrieves timestamps from resource directory entries, if available. Args: pefile_object (pefile.PE): pefile object. Returns: list[int]: resource timestamps.
def _GetResourceTimestamps(self, pefile_object): timestamps = [] if not hasattr(pefile_object, 'DIRECTORY_ENTRY_RESOURCE'): return timestamps for entrydata in pefile_object.DIRECTORY_ENTRY_RESOURCE.entries: directory = entrydata.directory timestamp = getattr(directory, 'TimeDateStamp', 0) if timestamp: timestamps.append(timestamp) return timestamps
288,639
Retrieves the timestamp from the Load Configuration directory. Args: pefile_object (pefile.PE): pefile object. Returns: int: load configuration timestamps or None if there are none present.
def _GetLoadConfigTimestamp(self, pefile_object): if not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'): return None timestamp = getattr( pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0) return timestamp
288,640
Retrieves timestamps from delay import entries, if available. Args: pefile_object (pefile.PE): pefile object. Returns: tuple[str, int]: name of the DLL being imported and the second is the timestamp of the entry.
def _GetDelayImportTimestamps(self, pefile_object): delay_import_timestamps = [] if not hasattr(pefile_object, 'DIRECTORY_ENTRY_DELAY_IMPORT'): return delay_import_timestamps for importdata in pefile_object.DIRECTORY_ENTRY_DELAY_IMPORT: dll_name = importdata.dll try: dll_name = dll_name.decode('ascii') except UnicodeDecodeError: dll_name = dll_name.decode('ascii', errors='replace') timestamp = getattr(importdata.struct, 'dwTimeStamp', 0) delay_import_timestamps.append([dll_name, timestamp]) return delay_import_timestamps
288,641
Parses a Portable Executable (PE) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): pe_data = file_object.read() try: pefile_object = pefile.PE(data=pe_data, fast_load=True) pefile_object.parse_data_directories( directories=[ pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'], pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'], pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_RESOURCE'], pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT'],]) except: raise errors.UnableToParseFile() event_data = PEEventData() event_data.imphash = pefile_object.get_imphash() event_data.pe_type = self._GetPEType(pefile_object) event_data.section_names = self._GetSectionNames(pefile_object) # TODO: remove after refactoring the pe event formatter. event_data.data_type = 'pe:compilation:compilation_time' timestamp = getattr(pefile_object.FILE_HEADER, 'TimeDateStamp', None) # TODO: handle timestamp is None. date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) for dll_name, timestamp in self._GetImportTimestamps(pefile_object): if timestamp: event_data.dll_name = dll_name event_data.data_type = 'pe:import:import_time' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) for dll_name, timestamp in self._GetDelayImportTimestamps(pefile_object): if timestamp: event_data.dll_name = dll_name event_data.data_type = 'pe:delay_import:import_time' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) event_data.dll_name = None for timestamp in self._GetResourceTimestamps(pefile_object): if timestamp: event_data.data_type = 'pe:resource:creation_time' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetLoadConfigTimestamp(pefile_object) if timestamp: event_data.data_type = 'pe:load_config:modification_time' date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,642
Iterates over the log lines and provide a reader for the values. Args: line_reader (iter): yields each line in the log file. Yields: dict[str, str]: column values keyed by column header.
def _CreateDictReader(self, line_reader): for line in line_reader: if isinstance(line, py2to3.BYTES_TYPE): try: line = codecs.decode(line, self._encoding) except UnicodeDecodeError as exception: raise errors.UnableToParseFile( 'Unable decode line with error: {0!s}'.format(exception)) stripped_line = line.strip() values = stripped_line.split(self.DELIMITER) number_of_values = len(values) number_of_columns = len(self.COLUMNS) if number_of_values < self.MIN_COLUMNS: raise errors.UnableToParseFile( 'Expected at least {0:d} values, found {1:d}'.format( self.MIN_COLUMNS, number_of_values)) if number_of_values > number_of_columns: raise errors.UnableToParseFile( 'Expected at most {0:d} values, found {1:d}'.format( number_of_columns, number_of_values)) yield dict(zip(self.COLUMNS, values))
288,644
Parses a line of the log file and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row_offset (int): line number of the row. row (dict[str, str]): fields of a single row, as specified in COLUMNS.
def ParseRow(self, parser_mediator, row_offset, row): timestamp = self._ParseTimestamp(parser_mediator, row) if timestamp is None: return try: action = int(row['action'], 10) except (ValueError, TypeError): action = None try: scan_type = int(row['scan_type'], 10) except (ValueError, TypeError): scan_type = None event_data = TrendMicroAVEventData() event_data.action = action event_data.filename = row['filename'] event_data.offset = row_offset event_data.path = row['path'] event_data.scan_type = scan_type event_data.threat = row['threat'] event = time_events.DateTimeValuesEvent( timestamp, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,647
Verifies if a line of the file is in the expected format. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: bool: True if this is the correct parser, False otherwise.
def VerifyRow(self, parser_mediator, row): if len(row) < self.MIN_COLUMNS: return False # Check the date format! # If it doesn't parse, then this isn't a Trend Micro AV log. try: timestamp = self._ConvertToTimestamp(row['date'], row['time']) except (ValueError, TypeError): return False if timestamp is None: return False # Check that the action value is plausible. try: action = int(row['action'], 10) except (ValueError, TypeError): return False if action not in formatter.SCAN_RESULTS: return False return True
288,648
Parses a line of the log file and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row_offset (int): line number of the row. row (dict[str, str]): fields of a single row, as specified in COLUMNS.
def ParseRow(self, parser_mediator, row_offset, row): timestamp = self._ParseTimestamp(parser_mediator, row) if timestamp is None: return event_data = TrendMicroUrlEventData() event_data.offset = row_offset # Convert and store integer values. for field in ( 'credibility_rating', 'credibility_score', 'policy_identifier', 'threshold', 'block_mode'): try: value = int(row[field], 10) except (ValueError, TypeError): value = None setattr(event_data, field, value) # Store string values. for field in ('url', 'group_name', 'group_code', 'application_name', 'ip'): setattr(event_data, field, row[field]) event = time_events.DateTimeValuesEvent( timestamp, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,650
Verifies if a line of the file is in the expected format. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: bool: True if this is the correct parser, False otherwise.
def VerifyRow(self, parser_mediator, row): if len(row) < self.MIN_COLUMNS: return False # Check the date format! # If it doesn't parse, then this isn't a Trend Micro AV log. try: timestamp = self._ConvertToTimestamp(row['date'], row['time']) except ValueError: return False if timestamp is None: return False try: block_mode = int(row['block_mode'], 10) except (ValueError, TypeError): return False if block_mode not in formatter.BLOCK_MODES: return False return True
288,651
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--server', dest='server', type=str, action='store', default=cls._DEFAULT_SERVER, metavar='HOSTNAME', help='The hostname or server IP address of the server.') argument_group.add_argument( '--port', dest='port', type=int, action='store', default=cls._DEFAULT_PORT, metavar='PORT', help='The port number of the server.')
288,652
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (OutputModule): output module to configure. Raises: BadConfigObject: when the output module object does not have the SetServerInformation method.
def ParseOptions(cls, options, output_module): if not hasattr(output_module, 'SetServerInformation'): raise errors.BadConfigObject('Unable to set server information.') server = cls._ParseStringOption( options, 'server', default_value=cls._DEFAULT_SERVER) port = cls._ParseNumericOption( options, 'port', default_value=cls._DEFAULT_PORT) output_module.SetServerInformation(server, port)
288,653
Initializes a VirusTotal analyzer. Args: hash_queue (Queue.queue): queue that contains hashes to be analyzed. hash_analysis_queue (Queue.queue): queue the analyzer will append HashAnalysis objects to.
def __init__(self, hash_queue, hash_analysis_queue, **kwargs): super(VirusTotalAnalyzer, self).__init__( hash_queue, hash_analysis_queue, **kwargs) self._api_key = None self._checked_for_old_python_version = False
288,654
Queries VirusTotal for a specfic hashes. Args: digests (list[str]): hashes to look up. Returns: dict[str, object]: JSON response or None on error.
def _QueryHashes(self, digests): url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)} try: json_response = self.MakeRequestAndDecodeJSON( self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters) except errors.ConnectionError as exception: json_response = None logger.error('Unable to query VirusTotal with error: {0!s}.'.format( exception)) return json_response
288,655
Looks up hashes in VirusTotal using the VirusTotal HTTP API. The API is documented here: https://www.virustotal.com/en/documentation/public-api/ Args: hashes (list[str]): hashes to look up. Returns: list[HashAnalysis]: analysis results. Raises: RuntimeError: If the VirusTotal API key has not been set.
def Analyze(self, hashes): if not self._api_key: raise RuntimeError('No API key specified for VirusTotal lookup.') hash_analyses = [] json_response = self._QueryHashes(hashes) or [] # VirusTotal returns a dictionary when a single hash is queried # and a list when multiple hashes are queried. if isinstance(json_response, dict): json_response = [json_response] for result in json_response: resource = result['resource'] hash_analysis = interface.HashAnalysis(resource, result) hash_analyses.append(hash_analysis) return hash_analyses
288,656
Generates a list of strings that will be used in the event tag. Args: hash_information (dict[str, object]): the JSON decoded contents of the result of a VirusTotal lookup, as produced by the VirusTotalAnalyzer. Returns: list[str]: strings describing the results from VirusTotal.
def GenerateLabels(self, hash_information): response_code = hash_information['response_code'] if response_code == self._VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE: return ['virustotal_not_present'] if response_code == self._VIRUSTOTAL_PRESENT_RESPONSE_CODE: positives = hash_information['positives'] if positives > 0: return ['virustotal_detections_{0:d}'.format(positives)] return ['virsutotal_no_detections'] if response_code == self._VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE: return ['virustotal_analysis_pending'] logger.error( 'VirusTotal returned unknown response code {0!s}'.format( response_code)) return ['virustotal_unknown_response_code_{0:d}'.format(response_code)]
288,658
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() priority_level = event_values.get('level', None) if isinstance(priority_level, py2to3.INTEGER_TYPES): event_values['level'] = '{0:s} ({1:d})'.format( self._PRIORITY_LEVELS.get(priority_level, 'UNKNOWN'), priority_level) # If no rights are assigned the value is 0xffffffff (-1). read_uid = event_values.get('read_uid', None) if read_uid == -1: event_values['read_uid'] = 'ALL' # If no rights are assigned the value is 0xffffffff (-1). read_gid = event_values.get('read_gid', None) if read_gid == -1: event_values['read_gid'] = 'ALL' # TODO: get the real name for the user of the group having the uid or gid. return self._ConditionalFormatMessages(event_values)
288,659
Parses a generic windows timeline row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseGenericRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = WindowsTimelineGenericEventData() # Payload is JSON serialized as binary data in a BLOB field, with the text # encoded as UTF-8. payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload')) payload_json_string = payload_json_bytes.decode('utf-8') # AppId is JSON stored as unicode text. appid_entries_string = self._GetRowValue(query_hash, row, 'AppId') payload = json.loads(payload_json_string) appid_entries = json.loads(appid_entries_string) # Attempt to populate the package_identifier field by checking each of # these fields in the AppId JSON. package_id_locations = [ 'packageId', 'x_exe_path', 'windows_win32', 'windows_universal', 'alternateId'] for location in package_id_locations: for entry in appid_entries: if entry['platform'] == location and entry['application'] != '': event_data.package_identifier = entry['application'] break if event_data.package_identifier is None: # package_identifier has been populated and we're done. break if 'description' in payload: event_data.description = payload['description'] else: event_data.description = '' if 'appDisplayName' in payload and payload['appDisplayName'] != '': event_data.application_display_name = payload['appDisplayName'] elif 'displayText' in payload and payload['displayText'] != '': # Fall back to displayText if appDisplayName isn't available event_data.application_display_name = payload['displayText'] timestamp = self._GetRowValue(query_hash, row, 'StartTime') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data)
288,662
Parses a timeline row that describes a user interacting with an app. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseUserEngagedRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = WindowsTimelineUserEngagedEventData() event_data.package_identifier = self._GetRowValue( query_hash, row, 'PackageName') # Payload is JSON serialized as binary data in a BLOB field, with the text # encoded as UTF-8. payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload')) payload_json_string = payload_json_bytes.decode('utf-8') payload = json.loads(payload_json_string) if 'reportingApp' in payload: event_data.reporting_app = payload['reportingApp'] if 'activeDurationSeconds' in payload: event_data.active_duration_seconds = int(payload['activeDurationSeconds']) timestamp = self._GetRowValue(query_hash, row, 'StartTime') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data)
288,663
Parses a Call record row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseCallsRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) call_type = self._GetRowValue(query_hash, row, 'type') call_type = self.CALL_TYPE.get(call_type, 'UNKNOWN') duration = self._GetRowValue(query_hash, row, 'duration') timestamp = self._GetRowValue(query_hash, row, 'date') event_data = AndroidCallEventData() event_data.call_type = call_type event_data.duration = self._GetRowValue(query_hash, row, 'duration') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.number = self._GetRowValue(query_hash, row, 'number') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call Started') parser_mediator.ProduceEventWithEventData(event, event_data) if duration: if isinstance(duration, py2to3.STRING_TYPES): try: duration = int(duration, 10) except ValueError: duration = 0 # The duration is in seconds and the date value in milliseconds. timestamp += duration * 1000 date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Call Ended') parser_mediator.ProduceEventWithEventData(event, event_data)
288,665
Check if it is a valid MacOS system account plist file name. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. plist_name (str): name of the plist. top_level (dict[str, object]): plist top-level key.
def Process(self, parser_mediator, plist_name, top_level, **kwargs): super(MacUserPlugin, self).Process( parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
288,666
Extracts relevant user timestamp entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): if 'name' not in match or 'uid' not in match: return account = match['name'][0] uid = match['uid'][0] for policy in match.get('passwordpolicyoptions', []): try: xml_policy = ElementTree.fromstring(policy) except (ElementTree.ParseError, LookupError) as exception: logger.error(( 'Unable to parse XML structure for an user policy, account: ' '{0:s} and uid: {1!s}, with error: {2!s}').format( account, uid, exception)) continue for dict_elements in xml_policy.iterfind('dict'): key_values = [value.text for value in iter(dict_elements)] # Taking a list and converting it to a dict, using every other item # as the key and the other one as the value. policy_dict = dict(zip(key_values[0::2], key_values[1::2])) time_string = policy_dict.get('passwordLastSetTime', None) if time_string and time_string != '2001-01-01T00:00:00Z': try: date_time = dfdatetime_time_elements.TimeElements() date_time.CopyFromStringISO8601(time_string) except ValueError: date_time = None parser_mediator.ProduceExtractionWarning( 'unable to parse password last set time string: {0:s}'.format( time_string)) shadow_hash_data = match.get('ShadowHashData', None) if date_time and isinstance(shadow_hash_data, (list, tuple)): # Extract the hash password information. # It is store in the attribute ShadowHasData which is # a binary plist data; However biplist only extracts one # level of binary plist, then it returns this information # as a string. # TODO: change this into a DataRange instead. For this we # need the file offset and size of the ShadowHashData value data. shadow_hash_data = shadow_hash_data[0] resolver_context = context.Context() fake_file = fake_file_io.FakeFile( resolver_context, shadow_hash_data) shadow_hash_data_path_spec = fake_path_spec.FakePathSpec( location='ShadowHashData') fake_file.open(path_spec=shadow_hash_data_path_spec) try: plist_file = biplist.readPlist(fake_file) except biplist.InvalidPlistException: plist_file = {} salted_hash = plist_file.get('SALTED-SHA512-PBKDF2', None) if salted_hash: salt_hex_bytes = codecs.encode(salted_hash['salt'], 'hex') salt_string = codecs.decode(salt_hex_bytes, 'ascii') entropy_hex_bytes = codecs.encode(salted_hash['entropy'], 'hex') entropy_string = codecs.decode(entropy_hex_bytes, 'ascii') password_hash = '$ml${0:d}${1:s}${2:s}'.format( salted_hash['iterations'], salt_string, entropy_string) else: password_hash = 'N/A' event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Last time {0:s} ({1!s}) changed the password: {2!s}').format( account, uid, password_hash) event_data.key = 'passwordLastSetTime' event_data.root = self._ROOT event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) time_string = policy_dict.get('lastLoginTimestamp', None) if time_string and time_string != '2001-01-01T00:00:00Z': try: date_time = dfdatetime_time_elements.TimeElements() date_time.CopyFromStringISO8601(time_string) except ValueError: date_time = None parser_mediator.ProduceExtractionWarning( 'unable to parse last login time string: {0:s}'.format( time_string)) if date_time: event_data = plist_event.PlistTimeEventData() event_data.desc = 'Last login from {0:s} ({1!s})'.format( account, uid) event_data.key = 'lastLoginTimestamp' event_data.root = self._ROOT event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) time_string = policy_dict.get('failedLoginTimestamp', None) if time_string and time_string != '2001-01-01T00:00:00Z': try: date_time = dfdatetime_time_elements.TimeElements() date_time.CopyFromStringISO8601(time_string) except ValueError: date_time = None parser_mediator.ProduceExtractionWarning( 'unable to parse failed login time string: {0:s}'.format( time_string)) if date_time: event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Last failed login from {0:s} ({1!s}) ({2!s} times)').format( account, uid, policy_dict.get('failedLoginCount', 0)) event_data.key = 'failedLoginTimestamp' event_data.root = self._ROOT event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,667
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() regvalue = event_values.get('regvalue', {}) string_parts = [] for key, value in sorted(regvalue.items()): string_parts.append('{0:s}: {1!s}'.format(key, value)) event_values['text'] = ' '.join(string_parts) return self._ConditionalFormatMessages(event_values)
288,668
Determines if a Windows Registry key matches the filter. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Returns: bool: True if the Windows Registry key matches the filter.
def Match(self, registry_key): key_path = registry_key.path.upper() # Prevent this filter matching non-string MRUList values. for ignore_key_path_suffix in self._IGNORE_KEY_PATH_SUFFIXES: if key_path.endswith(ignore_key_path_suffix): return False return super(MRUListStringRegistryKeyFilter, self).Match(registry_key)
288,669
Parses the MRUList value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. Returns: mrulist_entries: MRUList entries or None if not available.
def _ParseMRUListValue(self, registry_key): mrulist_value = registry_key.GetValueByName('MRUList') # The key exists but does not contain a value named "MRUList". if not mrulist_value: return None mrulist_entries_map = self._GetDataTypeMap('mrulist_entries') context = dtfabric_data_maps.DataTypeMapContext(values={ 'data_size': len(mrulist_value.data)}) return self._ReadStructureFromByteStream( mrulist_value.data, 0, mrulist_entries_map, context=context)
288,670
Extract event objects from a MRUList Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. codepage (Optional[str]): extended ASCII string codepage.
def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'): try: mrulist = self._ParseMRUListValue(registry_key) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse MRUList value with error: {0!s}'.format(exception)) return if not mrulist: return values_dict = {} found_terminator = False for entry_index, entry_letter in enumerate(mrulist): # The MRU list is terminated with '\0' (0x0000). if entry_letter == 0: break if found_terminator: parser_mediator.ProduceExtractionWarning(( 'found additional MRUList entries after terminator in key: ' '{0:s}.').format(registry_key.path)) # Only create one parser error per terminator. found_terminator = False entry_letter = chr(entry_letter) value_string = self._ParseMRUListEntryValue( parser_mediator, registry_key, entry_index, entry_letter, codepage=codepage) value_text = 'Index: {0:d} [MRU Value {1:s}]'.format( entry_index + 1, entry_letter) values_dict[value_text] = value_string event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,671
Parses the MRUList entry value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. entry_index (int): MRUList entry index. entry_letter (str): character value representing the entry. Returns: str: MRUList entry value.
def _ParseMRUListEntryValue( self, parser_mediator, registry_key, entry_index, entry_letter, **kwargs): value_string = '' value = registry_key.GetValueByName('{0:s}'.format(entry_letter)) if value is None: parser_mediator.ProduceExtractionWarning( 'missing MRUList value: {0:s} in key: {1:s}.'.format( entry_letter, registry_key.path)) elif value.DataIsString(): value_string = value.GetDataAsObject() elif value.DataIsBinaryData(): logger.debug(( '[{0:s}] Non-string MRUList entry value: {1:s} parsed as string ' 'in key: {2:s}.').format(self.NAME, entry_letter, registry_key.path)) utf16le_string_map = self._GetDataTypeMap('utf16le_string') try: value_string = self._ReadStructureFromByteStream( value.data, 0, utf16le_string_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse MRUList entry value: {0:s} with error: ' '{1!s}').format(entry_letter, exception)) value_string = value_string.rstrip('\x00') return value_string
288,672
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. codepage (Optional[str]): extended ASCII string codepage.
def ExtractEvents( self, parser_mediator, registry_key, codepage='cp1252', **kwargs): self._ParseMRUListKey(parser_mediator, registry_key, codepage=codepage)
288,673
Initializes a delimiter separated values (DSV) parser. Args: encoding (Optional[str]): encoding used in the DSV file, where None indicates the codepage of the parser mediator should be used.
def __init__(self, encoding=None): super(DSVParser, self).__init__() self._encoding = encoding if py2to3.PY_2: self._end_of_line = b'\n' else: self._end_of_line = '\n' self._maximum_line_length = ( len(self._end_of_line) + len(self.COLUMNS) * (self.FIELD_SIZE_LIMIT + len(self.DELIMITER)))
288,675
Converts all strings in a DSV row dict to Unicode. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, bytes]): a row from a DSV file, where the dictionary key contains the column name and the value a binary string. Returns: dict[str, str]: a row from the DSV file, where the dictionary key contains the column name and the value a Unicode string.
def _ConvertRowToUnicode(self, parser_mediator, row): for key, value in iter(row.items()): if isinstance(value, py2to3.UNICODE_TYPE): continue try: row[key] = value.decode(self._encoding) except UnicodeDecodeError: replaced_value = value.decode(self._encoding, errors='replace') parser_mediator.ProduceExtractionWarning( 'error decoding DSV value: {0:s} as {1:s}, characters have been ' 'replaced in {2:s}'.format(key, self._encoding, replaced_value)) row[key] = replaced_value return row
288,676
Returns a reader that processes each row and yields dictionaries. csv.DictReader does this job well for single-character delimiters; parsers that need multi-character delimiters need to override this method. Args: line_reader (iter): yields lines from a file-like object. Returns: iter: a reader of dictionaries, as returned by csv.DictReader().
def _CreateDictReader(self, line_reader): delimiter = self.DELIMITER quotechar = self.QUOTE_CHAR magic_test_string = self._MAGIC_TEST_STRING # Python 3 csv module requires arguments to constructor to be of type str. if py2to3.PY_3: delimiter = delimiter.decode(self._encoding) quotechar = quotechar.decode(self._encoding) magic_test_string = magic_test_string.decode(self._encoding) return csv.DictReader( line_reader, delimiter=delimiter, fieldnames=self.COLUMNS, quotechar=quotechar, restkey=magic_test_string, restval=magic_test_string)
288,677