docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Signal handler for the SIGSEGV signal. Args: signal_number (int): numeric representation of the signal. stack_frame (frame): current stack frame or None.
def _SigSegvHandler(self, signal_number, stack_frame): self._OnCriticalError() # Note that the original SIGSEGV handler can be 0. if self._original_sigsegv_handler is not None: # Let the original SIGSEGV handler take over. signal.signal(signal.SIGSEGV, self._original_sigsegv_handler) os.kill(self._pid, signal.SIGSEGV)
289,164
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration.
def _StartProfiling(self, configuration): if not configuration: return if configuration.HaveProfileMemoryGuppy(): self._guppy_memory_profiler = profilers.GuppyMemoryProfiler( self._name, configuration) self._guppy_memory_profiler.Start() if configuration.HaveProfileMemory(): self._memory_profiler = profilers.MemoryProfiler( self._name, configuration) self._memory_profiler.Start() if configuration.HaveProfileProcessing(): identifier = '{0:s}-processing'.format(self._name) self._processing_profiler = profilers.ProcessingProfiler( identifier, configuration) self._processing_profiler.Start() if configuration.HaveProfileSerializers(): identifier = '{0:s}-serializers'.format(self._name) self._serializers_profiler = profilers.SerializersProfiler( identifier, configuration) self._serializers_profiler.Start() if configuration.HaveProfileStorage(): self._storage_profiler = profilers.StorageProfiler( self._name, configuration) self._storage_profiler.Start() if configuration.HaveProfileTasks(): self._tasks_profiler = profilers.TasksProfiler(self._name, configuration) self._tasks_profiler.Start()
289,166
Parses a data object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the data object relative to the start of the file-like object. Returns: bytes: data. Raises: ParseError: if the data object cannot be parsed.
def _ParseDataObject(self, file_object, file_offset): data_object_map = self._GetDataTypeMap('systemd_journal_data_object') try: data_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, data_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse data object at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if data_object.object_type != self._OBJECT_TYPE_DATA: raise errors.ParseError('Unsupported object type: {0:d}.'.format( data_object.object_type)) if data_object.object_flags not in ( 0, self._OBJECT_COMPRESSED_FLAG_XZ, self._OBJECT_COMPRESSED_FLAG_LZ4): raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format( data_object.object_flags)) # The data is read separately for performance reasons. data_size = data_object.data_size - 64 data = file_object.read(data_size) if data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_XZ: data = lzma.decompress(data) elif data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_LZ4: uncompressed_size_map = self._GetDataTypeMap('uint32le') try: uncompressed_size = self._ReadStructureFromByteStream( data, file_offset + 64, uncompressed_size_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with ' 'error: {1!s}').format(file_offset + 64, exception)) data = lz4.block.decompress( data[8:], uncompressed_size=uncompressed_size) return data
289,172
Parses an entry array object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry array object relative to the start of the file-like object. Returns: systemd_journal_entry_array_object: entry array object. Raises: ParseError: if the entry array object cannot be parsed.
def _ParseEntryArrayObject(self, file_object, file_offset): entry_array_object_map = self._GetDataTypeMap( 'systemd_journal_entry_array_object') try: entry_array_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_array_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse entry array object at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY: raise errors.ParseError('Unsupported object type: {0:d}.'.format( entry_array_object.object_type)) if entry_array_object.object_flags != 0: raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format( entry_array_object.object_flags)) return entry_array_object
289,173
Parses an entry object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry object relative to the start of the file-like object. Returns: systemd_journal_entry_object: entry object. Raises: ParseError: if the entry object cannot be parsed.
def _ParseEntryObject(self, file_object, file_offset): entry_object_map = self._GetDataTypeMap('systemd_journal_entry_object') try: entry_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse entry object at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) if entry_object.object_type != self._OBJECT_TYPE_ENTRY: raise errors.ParseError('Unsupported object type: {0:d}.'.format( entry_object.object_type)) if entry_object.object_flags != 0: raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format( entry_object.object_flags)) return entry_object
289,174
Parses entry array objects for the offset of the entry objects. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the first entry array object relative to the start of the file-like object. Returns: list[int]: offsets of the entry objects.
def _ParseEntryObjectOffsets(self, file_object, file_offset): entry_array_object = self._ParseEntryArrayObject(file_object, file_offset) entry_object_offsets = list(entry_array_object.entry_object_offsets) while entry_array_object.next_entry_array_offset != 0: entry_array_object = self._ParseEntryArrayObject( file_object, entry_array_object.next_entry_array_offset) entry_object_offsets.extend(entry_array_object.entry_object_offsets) return entry_object_offsets
289,175
Parses a journal entry. This method will generate an event per ENTRY object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry object relative to the start of the file-like object. Returns: dict[str, objects]: entry items per key. Raises: ParseError: when an object offset is out of bounds.
def _ParseJournalEntry(self, file_object, file_offset): entry_object = self._ParseEntryObject(file_object, file_offset) # The data is read separately for performance reasons. entry_item_map = self._GetDataTypeMap('systemd_journal_entry_item') file_offset += 64 data_end_offset = file_offset + entry_object.data_size - 64 fields = {'real_time': entry_object.real_time} while file_offset < data_end_offset: try: entry_item, entry_item_data_size = self._ReadStructureFromFileObject( file_object, file_offset, entry_item_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse entry item at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) file_offset += entry_item_data_size if entry_item.object_offset < self._maximum_journal_file_offset: raise errors.ParseError( 'object offset should be after hash tables ({0:d} < {1:d})'.format( entry_item.object_offset, self._maximum_journal_file_offset)) event_data = self._ParseDataObject(file_object, entry_item.object_offset) event_string = event_data.decode('utf-8') key, value = event_string.split('=', 1) fields[key] = value return fields
289,176
Parses a Systemd journal file-like object. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the header cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): file_header_map = self._GetDataTypeMap('systemd_journal_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) if file_header.signature != self._FILE_SIGNATURE: raise errors.UnableToParseFile('Invalid file signature.') if file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES: raise errors.UnableToParseFile( 'Unsupported file header size: {0:d}.'.format( file_header.header_size)) data_hash_table_end_offset = ( file_header.data_hash_table_offset + file_header.data_hash_table_size) field_hash_table_end_offset = ( file_header.field_hash_table_offset + file_header.field_hash_table_size) self._maximum_journal_file_offset = max( data_hash_table_end_offset, field_hash_table_end_offset) entry_object_offsets = self._ParseEntryObjectOffsets( file_object, file_header.entry_array_offset) for entry_object_offset in entry_object_offsets: if entry_object_offset == 0: continue try: fields = self._ParseJournalEntry(file_object, entry_object_offset) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse journal entry at offset: 0x{0:08x} with ' 'error: {1!s}').format(entry_object_offset, exception)) return event_data = SystemdJournalEventData() event_data.body = fields.get('MESSAGE', None) event_data.hostname = fields.get('_HOSTNAME', None) event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None) if event_data.reporter and event_data.reporter != 'kernel': event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None)) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=fields['real_time']) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,177
Retrieves a string representation of the event type. Args: event_type (int): event type. Returns: str: description of the event type.
def GetEventTypeString(self, event_type): if 0 <= event_type < len(self._EVENT_TYPES): return self._EVENT_TYPES[event_type] return 'Unknown {0:d}'.format(event_type)
289,179
Retrieves a string representation of the severity. Args: severity (int): severity. Returns: str: description of the event severity.
def GetSeverityString(self, severity): if 0 <= severity < len(self._SEVERITY): return self._SEVERITY[severity] return 'Unknown {0:d}'.format(severity)
289,180
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() event_type = event_values.get('event_type', None) if event_type is not None: event_values['event_type'] = self.GetEventTypeString(event_type) # TODO: add string representation of facility. severity = event_values.get('severity', None) if severity is not None: event_values['severity'] = self.GetSeverityString(severity) source_name = event_values.get('source_name', None) message_identifier = event_values.get('message_identifier', None) strings = event_values.get('strings', []) if source_name and message_identifier: message_string = formatter_mediator.GetWindowsEventMessage( source_name, message_identifier) if message_string: try: event_values['message_string'] = message_string.format(*strings) except IndexError: # Unable to create the message string. pass message_strings = [] for string in strings: message_strings.append('\'{0:s}\''.format(string)) message_string = ', '.join(message_strings) event_values['strings'] = '[{0:s}]'.format(message_string) return self._ConditionalFormatMessages(event_values)
289,181
Initializes the output module object. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
def __init__(self, output_mediator): super(JSONOutputModule, self).__init__(output_mediator) self._event_counter = 0
289,182
Writes the body of an event object to the output. Args: event (EventObject): event.
def WriteEventBody(self, event): inode = getattr(event, 'inode', None) if inode is None: event.inode = 0 json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event) json_string = json.dumps(json_dict, sort_keys=True) if self._event_counter != 0: self._output_writer.Write(', ') line = '"event_{0:d}": {1:s}\n'.format(self._event_counter, json_string) self._output_writer.Write(line) self._event_counter += 1
289,183
Retrieves the row cache for a specific query. The row cache is a set that contains hashes of values in a row. The row cache is used to find duplicate row when a database and a database with a WAL file is parsed. Args: query (str): query. Returns: set: hashes of the rows that have been parsed.
def GetRowCache(self, query): query_hash = hash(query) if query_hash not in self._row_caches: self._row_caches[query_hash] = set() return self._row_caches[query_hash]
289,185
Initializes the database object. Args: filename (str): name of the file entry. temporary_directory (Optional[str]): path of the directory for temporary files.
def __init__(self, filename, temporary_directory=None): self._database = None self._filename = filename self._is_open = False self._temp_db_file_path = '' self._temporary_directory = temporary_directory self._temp_wal_file_path = '' self.schema = {}
289,186
Copies the contents of the file-like object to a temporary file. Args: file_object (dfvfs.FileIO): file-like object. temporary_file (file): temporary file.
def _CopyFileObjectToTemporaryFile(self, file_object, temporary_file): file_object.seek(0, os.SEEK_SET) data = file_object.read(self._READ_BUFFER_SIZE) while data: temporary_file.write(data) data = file_object.read(self._READ_BUFFER_SIZE)
289,187
Queries the database. Args: query (str): SQL query. Returns: sqlite3.Cursor: results. Raises: sqlite3.DatabaseError: if querying the database fails.
def Query(self, query): cursor = self._database.cursor() cursor.execute(query) return cursor
289,190
Parses a SQLite database file entry. Args: parser_mediator (ParserMediator): parser mediator. file_entry (dfvfs.FileEntry): file entry to be parsed. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileEntry(self, parser_mediator, file_entry): filename = parser_mediator.GetFilename() database = SQLiteDatabase( filename, temporary_directory=parser_mediator.temporary_directory) file_object = file_entry.GetFileObject() try: database.Open(file_object) except (IOError, ValueError, sqlite3.DatabaseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to open SQLite database with error: {0!s}'.format(exception)) file_object.close() return database_wal, wal_file_entry = self._OpenDatabaseWithWAL( parser_mediator, file_entry, file_object, filename) file_object.close() # Create a cache in which the resulting tables are cached. cache = SQLiteCache() try: table_names = frozenset(database.tables) for plugin in self._plugins: if not plugin.REQUIRED_TABLES.issubset(table_names): continue schema_match = plugin.CheckSchema(database) if plugin.REQUIRES_SCHEMA_MATCH and not schema_match: parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} found required tables but not a matching ' 'schema').format(plugin.NAME)) continue parser_mediator.SetFileEntry(file_entry) parser_mediator.AddEventAttribute('schema_match', schema_match) try: plugin.UpdateChainAndProcess( parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse SQLite database with error: ' '{1!s}').format(plugin.NAME, exception)) finally: parser_mediator.RemoveEventAttribute('schema_match') if not database_wal: continue schema_match = plugin.CheckSchema(database) parser_mediator.SetFileEntry(wal_file_entry) parser_mediator.AddEventAttribute('schema_match', schema_match) try: plugin.UpdateChainAndProcess( parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse SQLite database and WAL with ' 'error: {1!s}').format(plugin.NAME, exception)) finally: parser_mediator.RemoveEventAttribute('schema_match') finally: database.Close()
289,192
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): for subkey in registry_key.GetSubkeys(): values_dict = {} values_dict['subkey_name'] = subkey.name name_values = subkey.name.split('&') number_of_name_values = len(name_values) # Normally we expect 4 fields here however that is not always the case. if number_of_name_values != 4: logger.warning( 'Expected 4 &-separated values in: {0:s}'.format(subkey.name)) if number_of_name_values >= 1: values_dict['device_type'] = name_values[0] if number_of_name_values >= 2: values_dict['vendor'] = name_values[1] if number_of_name_values >= 3: values_dict['product'] = name_values[2] if number_of_name_values >= 4: values_dict['revision'] = name_values[3] event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND if subkey.number_of_subkeys == 0: # Time last USB device of this class was first inserted. event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) continue for device_key in subkey.GetSubkeys(): values_dict['serial'] = device_key.name friendly_name_value = device_key.GetValueByName('FriendlyName') if friendly_name_value: values_dict['friendly_name'] = friendly_name_value.GetDataAsObject() else: values_dict.pop('friendly_name', None) # ParentIdPrefix applies to Windows XP Only. parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix') if parent_id_prefix_value: values_dict['parent_id_prefix'] = ( parent_id_prefix_value.GetDataAsObject()) else: values_dict.pop('parent_id_prefix', None) # Time last USB device of this class was first inserted. event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) # Win7 - Last Connection. # Vista/XP - Time of an insert. event = time_events.DateTimeValuesEvent( device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) device_parameter_key = device_key.GetSubkeyByName('Device Parameters') if device_parameter_key: event = time_events.DateTimeValuesEvent( device_parameter_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) log_configuration_key = device_key.GetSubkeyByName('LogConf') if log_configuration_key: event = time_events.DateTimeValuesEvent( log_configuration_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) properties_key = device_key.GetSubkeyByName('Properties') if properties_key: event = time_events.DateTimeValuesEvent( properties_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,193
Parses a C string from the page data. Args: page_data (bytes): page data. string_offset (int): offset of the string relative to the start of the page. Returns: str: string. Raises: ParseError: when the string cannot be parsed.
def _ParseCString(self, page_data, string_offset): cstring_map = self._GetDataTypeMap('cstring') try: value_string = self._ReadStructureFromByteStream( page_data[string_offset:], string_offset, cstring_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map string data at offset: 0x{0:08x} with error: ' '{1!s}').format(string_offset, exception)) return value_string.rstrip('\x00')
289,196
Parses a page. Args: parser_mediator (ParserMediator): parser mediator. file_offset (int): offset of the data relative from the start of the file-like object. page_data (bytes): page data. Raises: ParseError: when the page cannot be parsed.
def _ParsePage(self, parser_mediator, file_offset, page_data): page_header_map = self._GetDataTypeMap('binarycookies_page_header') try: page_header = self._ReadStructureFromByteStream( page_data, file_offset, page_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map page header data at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) for record_offset in page_header.offsets: if parser_mediator.abort: break self._ParseRecord(parser_mediator, page_data, record_offset)
289,197
Parses a record from the page data. Args: parser_mediator (ParserMediator): parser mediator. page_data (bytes): page data. record_offset (int): offset of the record relative to the start of the page. Raises: ParseError: when the record cannot be parsed.
def _ParseRecord(self, parser_mediator, page_data, record_offset): record_header_map = self._GetDataTypeMap('binarycookies_record_header') try: record_header = self._ReadStructureFromByteStream( page_data[record_offset:], record_offset, record_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map record header data at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) event_data = SafariBinaryCookieEventData() event_data.flags = record_header.flags if record_header.url_offset: data_offset = record_offset + record_header.url_offset event_data.url = self._ParseCString(page_data, data_offset) if record_header.name_offset: data_offset = record_offset + record_header.name_offset event_data.cookie_name = self._ParseCString(page_data, data_offset) if record_header.path_offset: data_offset = record_offset + record_header.path_offset event_data.path = self._ParseCString(page_data, data_offset) if record_header.value_offset: data_offset = record_offset + record_header.value_offset event_data.cookie_value = self._ParseCString(page_data, data_offset) if record_header.creation_time: date_time = dfdatetime_cocoa_time.CocoaTime( timestamp=record_header.creation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if record_header.expiration_time: date_time = dfdatetime_cocoa_time.CocoaTime( timestamp=record_header.expiration_time) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) for plugin in self._cookie_plugins: if parser_mediator.abort: break if event_data.cookie_name != plugin.COOKIE_NAME: continue try: plugin.UpdateChainAndProcess( parser_mediator, cookie_name=event_data.cookie_name, cookie_data=event_data.cookie_value, url=event_data.url) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning( 'plugin: {0:s} unable to parse cookie with error: {1!s}'.format( plugin.NAME, exception))
289,198
Parses a Safari binary cookie file-like object. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): file-like object to be parsed. Raises: UnableToParseFile: when the file cannot be parsed, this will signal the event extractor to apply other parsers.
def ParseFileObject(self, parser_mediator, file_object): file_header_map = self._GetDataTypeMap('binarycookies_file_header') try: file_header, file_header_data_size = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to read file header with error: {0!s}.'.format(exception)) if file_header.signature != self._SIGNATURE: raise errors.UnableToParseFile('Unsupported file signature.') file_offset = file_header_data_size # TODO: move page sizes array into file header, this will require dtFabric # to compare signature as part of data map. page_sizes_data_size = file_header.number_of_pages * 4 page_sizes_data = file_object.read(page_sizes_data_size) context = dtfabric_data_maps.DataTypeMapContext(values={ 'binarycookies_file_header': file_header}) page_sizes_map = self._GetDataTypeMap('binarycookies_page_sizes') try: page_sizes_array = self._ReadStructureFromByteStream( page_sizes_data, file_offset, page_sizes_map, context=context) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map page sizes data at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) file_offset += page_sizes_data_size for page_number, page_size in enumerate(page_sizes_array): if parser_mediator.abort: break page_data = file_object.read(page_size) if len(page_data) != page_size: parser_mediator.ProduceExtractionWarning( 'unable to read page: {0:d}'.format(page_number)) break self._ParsePage(parser_mediator, file_offset, page_data) file_offset += page_size
289,199
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--disable_zeromq', '--disable-zeromq', action='store_false', dest='use_zeromq', default=True, help=( 'Disable queueing using ZeroMQ. A Multiprocessing queue will be ' 'used instead.'))
289,200
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') use_zeromq = getattr(options, 'use_zeromq', True) setattr(configuration_object, '_use_zeromq', use_zeromq)
289,201
Initializes a dynamic fields helper. Args: output_mediator (OutputMediator): output mediator.
def __init__(self, output_mediator): super(DynamicFieldsHelper, self).__init__() self._output_mediator = output_mediator
289,202
Formats the date. Args: event (EventObject): event. Returns: str: date field.
def _FormatDate(self, event): # TODO: preserve dfdatetime as an object. # TODO: add support for self._output_mediator.timezone date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=event.timestamp) year, month, day_of_month = date_time.GetDate() try: return '{0:04d}-{1:02d}-{2:02d}'.format(year, month, day_of_month) except (TypeError, ValueError): self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date. ' 'Defaulting to: "0000-00-00"').format(event.timestamp)) return '0000-00-00'
289,203
Formats the date and time in ISO 8601 format. Args: event (EventObject): event. Returns: str: date and time field.
def _FormatDateTime(self, event): try: return timelib.Timestamp.CopyToIsoFormat( event.timestamp, timezone=self._output_mediator.timezone, raise_error=True) except (OverflowError, ValueError) as exception: self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date and time ' 'with error: {1!s}. Defaulting to: "0000-00-00T00:00:00"').format( event.timestamp, exception)) return '0000-00-00T00:00:00'
289,204
Formats the inode. Args: event (EventObject): event. Returns: str: inode field.
def _FormatInode(self, event): inode = event.inode if inode is None: if hasattr(event, 'pathspec') and hasattr(event.pathspec, 'image_inode'): inode = event.pathspec.image_inode if inode is None: inode = '-' return inode
289,205
Formats the message. Args: event (EventObject): event. Returns: str: message field. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event.
def _FormatMessage(self, event): message, _ = self._output_mediator.GetFormattedMessages(event) if message is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return message
289,206
Formats the short message. Args: event (EventObject): event. Returns: str: short message field. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event.
def _FormatMessageShort(self, event): _, message_short = self._output_mediator.GetFormattedMessages(event) if message_short is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return message_short
289,207
Formats the source. Args: event (EventObject): event. Returns: str: source field. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event.
def _FormatSource(self, event): _, source = self._output_mediator.GetFormattedSources(event) if source is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return source
289,208
Formats the short source. Args: event (EventObject): event. Returns: str: short source field. Raises: NoFormatterFound: If no event formatter can be found to match the data type in the event.
def _FormatSourceShort(self, event): source_short, _ = self._output_mediator.GetFormattedSources(event) if source_short is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return source_short
289,209
Formats the event tag. Args: event (EventObject): event. Returns: str: event tag field.
def _FormatTag(self, event): tag = getattr(event, 'tag', None) if not tag: return '-' return ' '.join(tag.labels)
289,210
Formats the specified field. Args: event (EventObject): event. field_name (str): name of the field. Returns: str: value of the field.
def GetFormattedField(self, event, field_name): callback_name = self._FIELD_FORMAT_CALLBACKS.get(field_name, None) callback_function = None if callback_name: callback_function = getattr(self, callback_name, None) if callback_function: output_value = callback_function(event) else: output_value = getattr(event, field_name, '-') if output_value is None: output_value = '-' elif not isinstance(output_value, py2to3.STRING_TYPES): output_value = '{0!s}'.format(output_value) return output_value
289,211
Initializes an output module object. Args: output_mediator (OutputMediator): an output mediator.
def __init__(self, output_mediator): super(DynamicOutputModule, self).__init__(output_mediator) self._dynamic_fields_helper = DynamicFieldsHelper(output_mediator) self._field_delimiter = self._DEFAULT_FIELD_DELIMITER self._fields = self._DEFAULT_FIELDS
289,212
Sanitizes a field for output. This method replaces any field delimiters with a space. Args: field (str): name of the field to sanitize. Returns: str: value of the field.
def _SanitizeField(self, field): if self._field_delimiter and isinstance(field, py2to3.STRING_TYPES): return field.replace(self._field_delimiter, ' ') return field
289,213
Writes the body of an event to the output. Args: event (EventObject): event.
def WriteEventBody(self, event): output_values = [] for field_name in self._fields: output_value = self._dynamic_fields_helper.GetFormattedField( event, field_name) output_value = self._SanitizeField(output_value) output_values.append(output_value) output_line = '{0:s}\n'.format(self._field_delimiter.join(output_values)) self._output_writer.Write(output_line)
289,214
Retrieves a specific serialized attribute container from the list. Args: index (int): attribute container index. Returns: bytes: serialized attribute container data or None if not available. Raises: IndexError: if the index is less than zero.
def GetAttributeContainerByIndex(self, index): if index < 0: raise IndexError( 'Unsupported negative index value: {0:d}.'.format(index)) if index < len(self._list): return self._list[index] return None
289,217
Pushes a serialized attribute container onto the list. Args: serialized_data (bytes): serialized attribute container data.
def PushAttributeContainer(self, serialized_data): self._list.append(serialized_data) self.data_size += len(serialized_data) self.next_sequence_number += 1
289,219
Deserializes an attribute container. Args: container_type (str): attribute container type. serialized_data (bytes): serialized attribute container data. Returns: AttributeContainer: attribute container or None. Raises: IOError: if the serialized data cannot be decoded. OSError: if the serialized data cannot be decoded.
def _DeserializeAttributeContainer(self, container_type, serialized_data): if not serialized_data: return None if self._serializers_profiler: self._serializers_profiler.StartTiming(container_type) try: serialized_string = serialized_data.decode('utf-8') except UnicodeDecodeError as exception: raise IOError('Unable to decode serialized data: {0!s}'.format( exception)) attribute_container = self._serializer.ReadSerialized(serialized_string) if self._serializers_profiler: self._serializers_profiler.StopTiming(container_type) return attribute_container
289,222
Retrieves a specific serialized attribute container. Args: container_type (str): attribute container type. index (int): attribute container index. Returns: bytes: serialized attribute container data or None if not available.
def _GetSerializedAttributeContainerByIndex(self, container_type, index): container_list = self._GetSerializedAttributeContainerList(container_type) return container_list.GetAttributeContainerByIndex(index)
289,223
Retrieves a serialized attribute container list. Args: container_type (str): attribute container type. Returns: SerializedAttributeContainerList: serialized attribute container list.
def _GetSerializedAttributeContainerList(self, container_type): container_list = self._serialized_attribute_containers.get( container_type, None) if not container_list: container_list = SerializedAttributeContainerList() self._serialized_attribute_containers[container_type] = container_list return container_list
289,224
Serializes an attribute container. Args: attribute_container (AttributeContainer): attribute container. Returns: bytes: serialized attribute container. Raises: IOError: if the attribute container cannot be serialized. OSError: if the attribute container cannot be serialized.
def _SerializeAttributeContainer(self, attribute_container): if self._serializers_profiler: self._serializers_profiler.StartTiming( attribute_container.CONTAINER_TYPE) try: attribute_container_data = self._serializer.WriteSerialized( attribute_container) if not attribute_container_data: raise IOError( 'Unable to serialize attribute container: {0:s}.'.format( attribute_container.CONTAINER_TYPE)) attribute_container_data = attribute_container_data.encode('utf-8') finally: if self._serializers_profiler: self._serializers_profiler.StopTiming( attribute_container.CONTAINER_TYPE) return attribute_container_data
289,225
Initializes a storage merge reader. Args: storage_writer (StorageWriter): storage writer.
def __init__(self, storage_writer): super(StorageMergeReader, self).__init__() self._storage_writer = storage_writer
289,226
Initializes a storage merge reader. Args: storage_writer (StorageWriter): storage writer.
def __init__(self, storage_writer): super(StorageFileMergeReader, self).__init__(storage_writer) self._serializer = json_serializer.JSONAttributeContainerSerializer self._serializers_profiler = None
289,227
Initializes a storage reader. Args: path (str): path to the input file.
def __init__(self, path): super(StorageFileReader, self).__init__() self._path = path self._storage_file = None
289,228
Initializes a storage writer. Args: session (Session): session the storage changes are part of. storage_type (Optional[str]): storage type. task(Optional[Task]): task.
def __init__( self, session, storage_type=definitions.STORAGE_TYPE_SESSION, task=None): super(StorageWriter, self).__init__() self._first_written_event_source_index = 0 self._serializers_profiler = None self._session = session self._storage_profiler = None self._storage_type = storage_type self._task = task self._written_event_source_index = 0 self.number_of_analysis_reports = 0 self.number_of_event_sources = 0 self.number_of_event_tags = 0 self.number_of_events = 0 self.number_of_warnings = 0
289,229
Initializes a storage writer. Args: session (Session): session the storage changes are part of. output_file (str): path to the output file. storage_type (Optional[str]): storage type. task(Optional[Task]): task.
def __init__( self, session, output_file, storage_type=definitions.STORAGE_TYPE_SESSION, task=None): super(StorageFileWriter, self).__init__( session, storage_type=storage_type, task=task) self._merge_task_storage_path = '' self._output_file = output_file self._processed_task_storage_path = '' self._storage_file = None self._task_storage_path = None
289,230
Retrieves the path of a task storage file in the merge directory. Args: task (Task): task. Returns: str: path of a task storage file file in the merge directory.
def _GetMergeTaskStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._merge_task_storage_path, filename)
289,231
Retrieves the path of a task storage file in the processed directory. Args: task (Task): task. Returns: str: path of a task storage file in the processed directory.
def _GetProcessedStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._processed_task_storage_path, filename)
289,232
Retrieves the path of a task storage file in the temporary directory. Args: task (Task): task. Returns: str: path of a task storage file in the temporary directory.
def _GetTaskStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._task_storage_path, filename)
289,233
Updates the counters. Args: event (EventObject): event.
def _UpdateCounters(self, event): self._session.parsers_counter['total'] += 1 # Here we want the name of the parser or plugin not the parser chain. parser_name = getattr(event, 'parser', '') _, _, parser_name = parser_name.rpartition('/') if not parser_name: parser_name = 'N/A' self._session.parsers_counter[parser_name] += 1
289,234
Adds an analysis report. Args: analysis_report (AnalysisReport): analysis report. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddAnalysisReport(self, analysis_report): self._RaiseIfNotWritable() self._storage_file.AddAnalysisReport(analysis_report) report_identifier = analysis_report.plugin_name self._session.analysis_reports_counter['total'] += 1 self._session.analysis_reports_counter[report_identifier] += 1 self.number_of_analysis_reports += 1
289,235
Adds an warning. Args: warning (ExtractionWarning): an extraction warning. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddWarning(self, warning): self._RaiseIfNotWritable() self._storage_file.AddWarning(warning) self.number_of_warnings += 1
289,236
Adds an event. Args: event (EventObject): an event. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddEvent(self, event): self._RaiseIfNotWritable() self._storage_file.AddEvent(event) self.number_of_events += 1 self._UpdateCounters(event)
289,237
Adds an event source. Args: event_source (EventSource): an event source. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddEventSource(self, event_source): self._RaiseIfNotWritable() self._storage_file.AddEventSource(event_source) self.number_of_event_sources += 1
289,238
Adds an event tag. Args: event_tag (EventTag): an event tag. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddEventTag(self, event_tag): self._RaiseIfNotWritable() self._storage_file.AddEventTag(event_tag) self._session.event_labels_counter['total'] += 1 for label in event_tag.labels: self._session.event_labels_counter[label] += 1 self.number_of_event_tags += 1
289,239
Checks if a task is ready for merging with this session storage. If the task is ready to be merged, this method also sets the task's storage file size. Args: task (Task): task. Returns: bool: True if the task is ready to be merged. Raises: IOError: if the storage type is not supported or OSError: if the storage type is not supported or if the temporary path for the task storage does not exist.
def CheckTaskReadyForMerge(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if not self._processed_task_storage_path: raise IOError('Missing processed task storage path.') processed_storage_file_path = self._GetProcessedStorageFilePath(task) try: stat_info = os.stat(processed_storage_file_path) except (IOError, OSError): return False task.storage_file_size = stat_info.st_size return True
289,240
Creates a task storage. The task storage is used to store attributes created by the task. Args: task(Task): task. Returns: StorageWriter: storage writer. Raises: IOError: if the storage type is not supported. OSError: if the storage type is not supported.
def CreateTaskStorage(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') storage_file_path = self._GetTaskStorageFilePath(task) return self._CreateTaskStorageWriter(storage_file_path, task)
289,241
Retrieves the events in increasing chronological order. This includes all events written to the storage including those pending being flushed (written) to the storage. Args: time_range (Optional[TimeRange]): time range used to filter events that fall in a specific period. Returns: generator(EventObject): event generator. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def GetSortedEvents(self, time_range=None): if not self._storage_file: raise IOError('Unable to read from closed storage writer.') return self._storage_file.GetSortedEvents(time_range=time_range)
289,245
Finalizes a processed task storage. Moves the task storage file from its temporary directory to the processed directory. Args: task (Task): task. Raises: IOError: if the storage type is not supported or if the storage file cannot be renamed. OSError: if the storage type is not supported or if the storage file cannot be renamed.
def FinalizeTaskStorage(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') storage_file_path = self._GetTaskStorageFilePath(task) processed_storage_file_path = self._GetProcessedStorageFilePath(task) try: os.rename(storage_file_path, processed_storage_file_path) except OSError as exception: raise IOError(( 'Unable to rename task storage file: {0:s} with error: ' '{1!s}').format(storage_file_path, exception))
289,246
Prepares a task storage for merging. Moves the task storage file from the processed directory to the merge directory. Args: task (Task): task. Raises: IOError: if the storage type is not supported or if the storage file cannot be renamed. OSError: if the storage type is not supported or if the storage file cannot be renamed.
def PrepareMergeTaskStorage(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') merge_storage_file_path = self._GetMergeTaskStorageFilePath(task) processed_storage_file_path = self._GetProcessedStorageFilePath(task) task.storage_file_size = os.path.getsize(processed_storage_file_path) try: os.rename(processed_storage_file_path, merge_storage_file_path) except OSError as exception: raise IOError(( 'Unable to rename task storage file: {0:s} with error: ' '{1!s}').format(processed_storage_file_path, exception))
289,248
Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def ReadPreprocessingInformation(self, knowledge_base): if not self._storage_file: raise IOError('Unable to read from closed storage writer.') self._storage_file.ReadPreprocessingInformation(knowledge_base)
289,249
Removes a processed task storage. Args: task (Task): task. Raises: IOError: if the storage type is not supported or if the storage file cannot be removed. OSError: if the storage type is not supported or if the storage file cannot be removed.
def RemoveProcessedTaskStorage(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') processed_storage_file_path = self._GetProcessedStorageFilePath(task) try: os.remove(processed_storage_file_path) except OSError as exception: raise IOError(( 'Unable to remove task storage file: {0:s} with error: ' '{1!s}').format(processed_storage_file_path, exception))
289,250
Sets the serializers profiler. Args: serializers_profiler (SerializersProfiler): serializers profiler.
def SetSerializersProfiler(self, serializers_profiler): self._serializers_profiler = serializers_profiler if self._storage_file: self._storage_file.SetSerializersProfiler(serializers_profiler)
289,251
Sets the storage profiler. Args: storage_profiler (StorageProfiler): storage profiler.
def SetStorageProfiler(self, storage_profiler): self._storage_profiler = storage_profiler if self._storage_file: self._storage_file.SetStorageProfiler(storage_profiler)
289,252
Removes the temporary path for the task storage. The results of tasks will be lost on abort. Args: abort (bool): True to indicate the stop is issued on abort. Raises: IOError: if the storage type is not supported. OSError: if the storage type is not supported.
def StopTaskStorage(self, abort=False): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') if os.path.isdir(self._merge_task_storage_path): if abort: shutil.rmtree(self._merge_task_storage_path) else: os.rmdir(self._merge_task_storage_path) if os.path.isdir(self._processed_task_storage_path): if abort: shutil.rmtree(self._processed_task_storage_path) else: os.rmdir(self._processed_task_storage_path) if os.path.isdir(self._task_storage_path): if abort: shutil.rmtree(self._task_storage_path) else: os.rmdir(self._task_storage_path) self._merge_task_storage_path = None self._processed_task_storage_path = None self._task_storage_path = None
289,255
Writes session completion information. Args: aborted (Optional[bool]): True if the session was aborted. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed.
def WriteSessionCompletion(self, aborted=False): self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Unsupported storage type.') self._session.aborted = aborted session_completion = self._session.CreateSessionCompletion() self._storage_file.WriteSessionCompletion(session_completion)
289,256
Writes task completion information. Args: aborted (Optional[bool]): True if the session was aborted. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed.
def WriteTaskCompletion(self, aborted=False): self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_TASK: raise IOError('Unsupported storage type.') self._task.aborted = aborted task_completion = self._task.CreateTaskCompletion() self._storage_file.WriteTaskCompletion(task_completion)
289,258
Parses a line of the log file and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row_offset (int): line number of the row. row (dict[str, str]): fields of a single row, as specified in COLUMNS.
def ParseRow(self, parser_mediator, row_offset, row): try: timestamp = self._ConvertToTimestamp( row['date'], row['time'], parser_mediator.timezone) except errors.TimestampError as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse time string: [{0:s} {1:s}] with error {2:s}'.format( repr(row['date']), repr(row['time']), exception)) return if timestamp is None: return event_data = McafeeAVEventData() event_data.action = row['action'] event_data.filename = row['filename'] event_data.offset = row_offset event_data.rule = row['rule'] event_data.status = row['status'] event_data.trigger_location = row['trigger_location'] event_data.username = row['username'] event = time_events.TimestampEvent( timestamp, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,262
Verifies if a line of the file is in the expected format. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: bool: True if this is the correct parser, False otherwise.
def VerifyRow(self, parser_mediator, row): if len(row) != 8: return False # This file can have a UTF-8 byte-order-marker at the beginning of # the first row. # TODO: Find out all the code pages this can have. Asked McAfee 10/31. row_bytes = codecs.encode(row['date'], parser_mediator.codepage) if row_bytes.startswith(b'\xef\xbb\xbf'): row['date'] = row['date'][3:] self._encoding = 'utf-8' # Check the date format! # If it doesn't parse, then this isn't a McAfee AV Access Protection Log try: timestamp = self._ConvertToTimestamp( row['date'], row['time'], parser_mediator.timezone) except errors.TimestampError: return False if timestamp is None: return False # Use the presence of these strings as a backup or in case of partial file. if (not 'Access Protection' in row['status'] and not 'Would be blocked' in row['status']): return False return True
289,263
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: analysis report.
def CompileReport(self, mediator): report_text = [ 'Sessionize plugin identified {0:d} sessions and ' 'applied {1:d} tags.'.format( len(self._events_per_session), self._number_of_event_tags)] for session, event_count in enumerate(self._events_per_session): report_text.append('\tSession {0:d}: {1:d} events'.format( session, event_count)) report_text = '\n'.join(report_text) return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
289,265
Analyzes an EventObject and tags it as part of a session. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
def ExamineEvent(self, mediator, event): if self._session_end_timestamp is None: self._session_end_timestamp = ( event.timestamp + self._maximum_pause_microseconds) self._events_per_session.append(0) if event.timestamp > self._session_end_timestamp: self._session_counter += 1 self._events_per_session.append(0) self._session_end_timestamp = ( event.timestamp + self._maximum_pause_microseconds) # The counter for the current session is the always the last item in # the list. self._events_per_session[-1] += 1 label = 'session_{0:d}'.format(self._session_counter) event_tag = self._CreateEventTag(event, self._EVENT_TAG_COMMENT, [label]) mediator.ProduceEventTag(event_tag) self._number_of_event_tags += 1
289,266
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() file_entry_type = event_values.get('file_entry_type', None) if file_entry_type is not None: event_values['file_entry_type'] = self._FILE_ENTRY_TYPES.get( file_entry_type, 'UNKNOWN') # The usage of allocated is deprecated in favor of is_allocated but # is kept here to be backwards compatible. if (not event_values.get('allocated', False) and not event_values.get('is_allocated', False)): event_values['unallocated'] = 'unallocated' return self._ConditionalFormatMessages(event_values)
289,267
Determines the the short and long source for an event object. Args: event (EventObject): event. Returns: tuple(str, str): short and long source string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetSources(self, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) file_system_type = getattr(event, 'file_system_type', 'UNKNOWN') timestamp_desc = getattr(event, 'timestamp_desc', 'Time') source_long = '{0:s} {1:s}'.format(file_system_type, timestamp_desc) return self.SOURCE_SHORT, source_long
289,268
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() attribute_type = event_values.get('attribute_type', 0) event_values['attribute_name'] = self._ATTRIBUTE_NAMES.get( attribute_type, 'UNKNOWN') file_reference = event_values.get('file_reference', None) if file_reference: event_values['file_reference'] = '{0:d}-{1:d}'.format( file_reference & 0xffffffffffff, file_reference >> 48) parent_file_reference = event_values.get('parent_file_reference', None) if parent_file_reference: event_values['parent_file_reference'] = '{0:d}-{1:d}'.format( parent_file_reference & 0xffffffffffff, parent_file_reference >> 48) if not event_values.get('is_allocated', False): event_values['unallocated'] = 'unallocated' return self._ConditionalFormatMessages(event_values)
289,269
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() file_reference = event_values.get('file_reference', None) if file_reference: event_values['file_reference'] = '{0:d}-{1:d}'.format( file_reference & 0xffffffffffff, file_reference >> 48) parent_file_reference = event_values.get('parent_file_reference', None) if parent_file_reference: event_values['parent_file_reference'] = '{0:d}-{1:d}'.format( parent_file_reference & 0xffffffffffff, parent_file_reference >> 48) update_reason_flags = event_values.get('update_reason_flags', 0) update_reasons = [] for bitmask, description in sorted(self._USN_REASON_FLAGS.items()): if bitmask & update_reason_flags: update_reasons.append(description) event_values['update_reason'] = ', '.join(update_reasons) update_source_flags = event_values.get('update_source_flags', 0) update_sources = [] for bitmask, description in sorted(self._USN_SOURCE_FLAGS.items()): if bitmask & update_source_flags: update_sources.append(description) event_values['update_source'] = ', '.join(update_sources) return self._ConditionalFormatMessages(event_values)
289,270
Initializes a multi-processing queue. Args: maximum_number_of_queued_items (Optional[int]): maximum number of queued items, where 0 represents no limit. timeout (Optional[float]): number of seconds for the get to time out, where None will block until a new item is put onto the queue.
def __init__(self, maximum_number_of_queued_items=0, timeout=None): super(MultiProcessingQueue, self).__init__() self._timeout = timeout # maxsize contains the maximum number of items allowed to be queued, # where 0 represents unlimited. # We need to check that we aren't asking for a bigger queue than the # platform supports, which requires access to this internal # multiprocessing value. # pylint: disable=no-member,protected-access queue_max_length = _multiprocessing.SemLock.SEM_VALUE_MAX # pylint: enable=no-member,protected-access if maximum_number_of_queued_items > queue_max_length: logger.warning(( 'Requested maximum queue size: {0:d} is larger than the maximum ' 'size supported by the system. Defaulting to: {1:d}').format( maximum_number_of_queued_items, queue_max_length)) maximum_number_of_queued_items = queue_max_length # This queue appears not to be FIFO. self._queue = multiprocessing.Queue(maxsize=maximum_number_of_queued_items)
289,271
Closes the queue. This needs to be called from any process or thread putting items onto the queue. Args: abort (Optional[bool]): True if the close was issued on abort.
def Close(self, abort=False): if abort: # Prevent join_thread() from blocking. self._queue.cancel_join_thread() self._queue.close() self._queue.join_thread()
289,272
Pushes an item onto the queue. Args: item (object): item to add. block (Optional[bool]): True to block the process when the queue is full. Raises: QueueFull: if the item could not be pushed the queue because it's full.
def PushItem(self, item, block=True): try: self._queue.put(item, block=block) except Queue.Full as exception: raise errors.QueueFull(exception)
289,273
Initializes a storage writer object. Args: session (Session): session the storage changes are part of. storage_type (Optional[str]): storage type. task(Optional[Task]): task.
def __init__( self, session, storage_type=definitions.STORAGE_TYPE_SESSION, task=None): super(FakeStorageWriter, self).__init__( session, storage_type=storage_type, task=task) self._event_data = {} self._event_sources = [] self._event_tags = [] self._events = [] self._warnings = [] self._is_open = False self._task_storage_writers = {} self.analysis_reports = [] self.session_completion = None self.session_start = None self.task_completion = None self.task_start = None
289,275
Prepares an attribute container for storage. Args: attribute_container (AttributeContainer): attribute container. Returns: AttributeContainer: copy of the attribute container to store in the fake storage.
def _PrepareAttributeContainer(self, attribute_container): attribute_values_hash = hash(attribute_container.GetAttributeValuesString()) identifier = identifiers.FakeIdentifier(attribute_values_hash) attribute_container.SetIdentifier(identifier) # Make sure the fake storage preserves the state of the attribute container. return copy.deepcopy(attribute_container)
289,276
Reads the data into the event. This function is intended to offer backwards compatible event behavior. Args: event (EventObject): event.
def _ReadEventDataIntoEvent(self, event): if self._storage_type != definitions.STORAGE_TYPE_SESSION: return event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: lookup_key = event_data_identifier.CopyToString() event_data = self._event_data[lookup_key] for attribute_name, attribute_value in event_data.GetAttributes(): setattr(event, attribute_name, attribute_value)
289,277
Adds an analysis report. Args: analysis_report (AnalysisReport): analysis report. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddAnalysisReport(self, analysis_report): self._RaiseIfNotWritable() analysis_report = self._PrepareAttributeContainer(analysis_report) self.analysis_reports.append(analysis_report)
289,278
Adds an event. Args: event (EventObject): event. Raises: IOError: when the storage writer is closed or if the event data identifier type is not supported. OSError: when the storage writer is closed or if the event data identifier type is not supported.
def AddEvent(self, event): self._RaiseIfNotWritable() # TODO: change to no longer allow event_data_identifier is None # after refactoring every parser to generate event data. event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: if not isinstance(event_data_identifier, identifiers.FakeIdentifier): raise IOError('Unsupported event data identifier type: {0:s}'.format( type(event_data_identifier))) event = self._PrepareAttributeContainer(event) self._events.append(event) self.number_of_events += 1
289,279
Adds event data. Args: event_data (EventData): event data. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddEventData(self, event_data): self._RaiseIfNotWritable() event_data = self._PrepareAttributeContainer(event_data) identifier = event_data.GetIdentifier() lookup_key = identifier.CopyToString() self._event_data[lookup_key] = event_data
289,280
Adds an event source. Args: event_source (EventSource): event source. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddEventSource(self, event_source): self._RaiseIfNotWritable() event_source = self._PrepareAttributeContainer(event_source) self._event_sources.append(event_source) self.number_of_event_sources += 1
289,281
Adds an event tag. Args: event_tag (EventTag): event tag. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddEventTag(self, event_tag): self._RaiseIfNotWritable() event_identifier = event_tag.GetEventIdentifier() if not isinstance(event_identifier, identifiers.FakeIdentifier): raise IOError('Unsupported event identifier type: {0:s}'.format( type(event_identifier))) event_tag = self._PrepareAttributeContainer(event_tag) self._event_tags.append(event_tag) self.number_of_event_tags += 1
289,282
Adds a warnings. Args: warning (ExtractionWarning): warning. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def AddWarning(self, warning): self._RaiseIfNotWritable() warning = self._PrepareAttributeContainer(warning) self._warnings.append(warning) self.number_of_warnings += 1
289,283
Creates a task storage. Args: task (Task): task. Returns: FakeStorageWriter: storage writer. Raises: IOError: if the task storage already exists. OSError: if the task storage already exists.
def CreateTaskStorage(self, task): if task.identifier in self._task_storage_writers: raise IOError('Storage writer for task: {0:s} already exists.'.format( task.identifier)) storage_writer = FakeStorageWriter( self._session, storage_type=definitions.STORAGE_TYPE_TASK, task=task) self._task_storage_writers[task.identifier] = storage_writer return storage_writer
289,284
Retrieves the events in increasing chronological order. Args: time_range (Optional[TimeRange]): time range used to filter events that fall in a specific period. Returns: generator(EventObject): event generator. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def GetSortedEvents(self, time_range=None): if not self._is_open: raise IOError('Unable to read from closed storage writer.') event_heap = event_heaps.EventHeap() for event in self._events: if (time_range and ( event.timestamp < time_range.start_timestamp or event.timestamp > time_range.end_timestamp)): continue # Make a copy of the event before adding the event data. event = copy.deepcopy(event) # TODO: refactor this into psort. self._ReadEventDataIntoEvent(event) event_heap.PushEvent(event) return iter(event_heap.PopEvents())
289,287
Finalizes a processed task storage. Args: task (Task): task. Raises: IOError: if the task storage does not exist. OSError: if the task storage does not exist.
def FinalizeTaskStorage(self, task): if task.identifier not in self._task_storage_writers: raise IOError('Storage writer for task: {0:s} does not exist.'.format( task.identifier))
289,288
Prepares a task storage for merging. Args: task (Task): task. Raises: IOError: if the task storage does not exist. OSError: if the task storage does not exist.
def PrepareMergeTaskStorage(self, task): if task.identifier not in self._task_storage_writers: raise IOError('Storage writer for task: {0:s} does not exist.'.format( task.identifier))
289,290
Removes a processed task storage. Args: task (Task): task. Raises: IOError: if the task storage does not exist. OSError: if the task storage does not exist.
def RemoveProcessedTaskStorage(self, task): if task.identifier not in self._task_storage_writers: raise IOError('Storage writer for task: {0:s} does not exist.'.format( task.identifier)) del self._task_storage_writers[task.identifier]
289,292
Writes preprocessing information. Args: knowledge_base (KnowledgeBase): used to store the preprocessing information. Raises: IOError: if the storage type does not support writing preprocessing information or when the storage writer is closed. OSError: if the storage type does not support writing preprocessing information or when the storage writer is closed.
def WritePreprocessingInformation(self, knowledge_base): self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Preprocessing information not supported by storage type.')
289,293
Retrieves a value from a parsed log line, removing empty results. Args: structure (pyparsing.ParseResults): parsed log line. key (str): results key to retrieve from the parsed log line. Returns: type or None: the value of the named key in the parsed log line, or None if the value is a ParseResults object.
def _GetStructureValue(self, structure, key): value = structure.get(key) return value if not isinstance(value, pyparsing.ParseResults) else None
289,296
Parse a comment and store appropriate attributes. Args: structure (pyparsing.ParseResults): parsed log line.
def _ParseCommentRecord(self, structure): comment = structure[1] if comment.startswith('Version'): _, _, self._version = comment.partition(':') elif comment.startswith('Software'): _, _, self._software = comment.partition(':') elif comment.startswith('Time'): _, _, time_format = comment.partition(':') if 'local' in time_format.lower(): self._use_local_timezone = True
289,297
Parse a single log line and and produce an event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
def _ParseLogLine(self, parser_mediator, structure): try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=structure.date_time) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return event_data = WinFirewallEventData() event_data.action = self._GetStructureValue(structure, 'action') event_data.dest_ip = self._GetStructureValue(structure, 'dest_ip') event_data.dest_port = self._GetStructureValue(structure, 'dest_port') event_data.flags = self._GetStructureValue(structure, 'flags') event_data.icmp_code = self._GetStructureValue(structure, 'icmp_code') event_data.icmp_type = self._GetStructureValue(structure, 'icmp_type') event_data.info = self._GetStructureValue(structure, 'info') event_data.path = self._GetStructureValue(structure, 'path') event_data.protocol = self._GetStructureValue(structure, 'protocol') event_data.size = self._GetStructureValue(structure, 'size') event_data.source_ip = self._GetStructureValue(structure, 'source_ip') event_data.source_port = self._GetStructureValue(structure, 'source_port') event_data.tcp_ack = self._GetStructureValue(structure, 'tcp_ack') event_data.tcp_seq = self._GetStructureValue(structure, 'tcp_seq') event_data.tcp_win = self._GetStructureValue(structure, 'tcp_win') if self._use_local_timezone: time_zone = parser_mediator.timezone else: time_zone = pytz.UTC event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN, time_zone=time_zone) parser_mediator.ProduceEventWithEventData(event, event_data)
289,298
Creates a service object from an event. Args: service_event (EventObject): event to create a new service object from. Returns: WindowsService: service.
def FromEvent(cls, service_event): _, _, name = service_event.key_path.rpartition( WindowsService._REGISTRY_KEY_PATH_SEPARATOR) service_type = service_event.regvalue.get('Type', '') image_path = service_event.regvalue.get('ImagePath', '') start_type = service_event.regvalue.get('Start', '') service_dll = service_event.regvalue.get('ServiceDll', '') object_name = service_event.regvalue.get('ObjectName', '') if service_event.pathspec: source = (service_event.pathspec.location, service_event.key_path) else: source = ('Unknown', 'Unknown') return cls( name=name, service_type=service_type, image_path=image_path, start_type=start_type, object_name=object_name, source=source, service_dll=service_dll)
289,301