docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initializes a storage merge reader. Args: storage_writer (StorageWriter): storage writer. path (str): path to the input file. Raises: IOError: if the input file cannot be opened. RuntimeError: if an add container method is missing.
def __init__(self, storage_writer, path): super(SQLiteStorageMergeReader, self).__init__(storage_writer) self._active_container_type = None self._active_cursor = None self._add_active_container_method = None self._add_container_type_methods = {} self._compression_format = definitions.COMPRESSION_FORMAT_NONE self._connection = None self._container_types = None self._cursor = None self._event_data_identifier_mappings = {} self._path = path # Create a runtime lookup table for the add container type method. This # prevents having to create a series of if-else checks for container types. # The table is generated at runtime as there are no forward function # declarations in Python. for container_type, method_name in self._ADD_CONTAINER_TYPE_METHODS.items(): method = getattr(self, method_name, None) if not method: raise RuntimeError( 'Add method missing for container type: {0:s}'.format( container_type)) self._add_container_type_methods[container_type] = method
288,986
Adds an event. Args: event (EventObject): event.
def _AddEvent(self, event): if hasattr(event, 'event_data_row_identifier'): event_data_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT_DATA, event.event_data_row_identifier) lookup_key = event_data_identifier.CopyToString() event_data_identifier = self._event_data_identifier_mappings[lookup_key] event.SetEventDataIdentifier(event_data_identifier) # TODO: add event identifier mappings for event tags. self._storage_writer.AddEvent(event)
288,987
Adds event data. Args: event_data (EventData): event data.
def _AddEventData(self, event_data): identifier = event_data.GetIdentifier() lookup_key = identifier.CopyToString() self._storage_writer.AddEventData(event_data) identifier = event_data.GetIdentifier() self._event_data_identifier_mappings[lookup_key] = identifier
288,988
Parse the well known actions for easy reading. Args: action (str): the function or action called by the agent. text (str): mac Wifi log text. Returns: str: a formatted string representing the known (or common) action. If the action is not known the original log text is returned.
def _GetAction(self, action, text): # TODO: replace "x in y" checks by startswith if possible. if 'airportdProcessDLILEvent' in action: interface = text.split()[0] return 'Interface {0:s} turn up.'.format(interface) if 'doAutoJoin' in action: match = self._CONNECTED_RE.match(text) if match: ssid = match.group(1)[1:-1] else: ssid = 'Unknown' return 'Wifi connected to SSID {0:s}'.format(ssid) if 'processSystemPSKAssoc' in action: wifi_parameters = self._WIFI_PARAMETERS_RE.search(text) if wifi_parameters: ssid = wifi_parameters.group(1) bssid = wifi_parameters.group(2) security = wifi_parameters.group(3) if not ssid: ssid = 'Unknown' if not bssid: bssid = 'Unknown' if not security: security = 'Unknown' return ( 'New wifi configured. BSSID: {0:s}, SSID: {1:s}, ' 'Security: {2:s}.').format(bssid, ssid, security) return text
288,996
Parse a single log line and produce an event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
def _ParseLogLine(self, parser_mediator, key, structure): time_elements_tuple = self._GetTimeElementsTuple(key, structure) try: date_time = dfdatetime_time_elements.TimeElementsInMilliseconds( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return self._last_month = time_elements_tuple[1] event_data = MacWifiLogEventData() event_data.agent = structure.agent # Due to the use of CharsNotIn pyparsing structure contains whitespaces # that need to be removed. event_data.function = structure.function.strip() event_data.text = structure.text if key == 'known_function_logline': event_data.action = self._GetAction( event_data.function, event_data.text) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
288,998
Parses a log record structure and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
def ParseRecord(self, parser_mediator, key, structure): if key not in self._SUPPORTED_KEYS: raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) self._ParseLogLine(parser_mediator, key, structure)
288,999
Verify that this file is a Mac Wifi log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
def VerifyStructure(self, parser_mediator, line): self._last_month = 0 self._year_use = parser_mediator.GetEstimatedYear() key = 'header' try: structure = self._MAC_WIFI_HEADER.parseString(line) except pyparsing.ParseException: structure = None if not structure: key = 'turned_over_header' try: structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line) except pyparsing.ParseException: structure = None if not structure: logger.debug('Not a Mac Wifi log file') return False time_elements_tuple = self._GetTimeElementsTuple(key, structure) try: dfdatetime_time_elements.TimeElementsInMilliseconds( time_elements_tuple=time_elements_tuple) except ValueError: logger.debug( 'Not a Mac Wifi log file, invalid date and time: {0!s}'.format( structure.date_time)) return False self._last_month = time_elements_tuple[1] return True
289,000
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() trigger_type = event_values.get('trigger_type', None) if trigger_type is not None: event_values['trigger_type'] = self._TRIGGER_TYPES.get( trigger_type, '0x{0:04x}'.format(trigger_type)) return self._ConditionalFormatMessages(event_values)
289,001
Extracts data from a Distributed Tracking identifier. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. uuid_object (uuid.UUID): UUID of the Distributed Tracking identifier. origin (str): origin of the event (event source). Returns: str: UUID string of the Distributed Tracking identifier.
def _ParseDistributedTrackingIdentifier( self, parser_mediator, uuid_object, origin): if uuid_object.version == 1: event_data = windows_events.WindowsDistributedLinkTrackingEventData( uuid_object, origin) date_time = dfdatetime_uuid_time.UUIDTime(timestamp=uuid_object.time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) return '{{{0!s}}}'.format(uuid_object)
289,003
Parses the DestList OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. olecf_item (pyolecf.item): OLECF item. Raises: UnableToParseFile: if the DestList cannot be parsed.
def ParseDestList(self, parser_mediator, olecf_item): header_map = self._GetDataTypeMap('dest_list_header') try: header, entry_offset = self._ReadStructureFromFileObject( olecf_item, 0, header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse DestList header with error: {0!s}'.format( exception)) if header.format_version == 1: entry_map = self._GetDataTypeMap('dest_list_entry_v1') elif header.format_version in (3, 4): entry_map = self._GetDataTypeMap('dest_list_entry_v3') else: parser_mediator.ProduceExtractionWarning( 'unsupported format version: {0:d}.'.format(header.format_version)) return while entry_offset < olecf_item.size: try: entry, entry_data_size = self._ReadStructureFromFileObject( olecf_item, entry_offset, entry_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse DestList entry with error: {0!s}'.format( exception)) display_name = 'DestList entry at offset: 0x{0:08x}'.format(entry_offset) try: droid_volume_identifier = self._ParseDistributedTrackingIdentifier( parser_mediator, entry.droid_volume_identifier, display_name) except (TypeError, ValueError) as exception: droid_volume_identifier = '' parser_mediator.ProduceExtractionWarning( 'unable to read droid volume identifier with error: {0!s}'.format( exception)) try: droid_file_identifier = self._ParseDistributedTrackingIdentifier( parser_mediator, entry.droid_file_identifier, display_name) except (TypeError, ValueError) as exception: droid_file_identifier = '' parser_mediator.ProduceExtractionWarning( 'unable to read droid file identifier with error: {0!s}'.format( exception)) try: birth_droid_volume_identifier = ( self._ParseDistributedTrackingIdentifier( parser_mediator, entry.birth_droid_volume_identifier, display_name)) except (TypeError, ValueError) as exception: birth_droid_volume_identifier = '' parser_mediator.ProduceExtractionWarning(( 'unable to read birth droid volume identifier with error: ' '{0:s}').format( exception)) try: birth_droid_file_identifier = self._ParseDistributedTrackingIdentifier( parser_mediator, entry.birth_droid_file_identifier, display_name) except (TypeError, ValueError) as exception: birth_droid_file_identifier = '' parser_mediator.ProduceExtractionWarning(( 'unable to read birth droid file identifier with error: ' '{0:s}').format( exception)) if entry.last_modification_time == 0: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=entry.last_modification_time) event_data = AutomaticDestinationsDestListEntryEventData() event_data.birth_droid_file_identifier = birth_droid_file_identifier event_data.birth_droid_volume_identifier = birth_droid_volume_identifier event_data.droid_file_identifier = droid_file_identifier event_data.droid_volume_identifier = droid_volume_identifier event_data.entry_number = entry.entry_number event_data.hostname = entry.hostname.rstrip('\x00') event_data.offset = entry_offset event_data.path = entry.path.rstrip('\x00') event_data.pin_status = entry.pin_status event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) entry_offset += entry_data_size
289,004
Parses an OLECF file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. root_item (Optional[pyolecf.item]): root item of the OLECF file. Raises: ValueError: If the root_item is not set.
def Process(self, parser_mediator, root_item=None, **kwargs): # This will raise if unhandled keyword arguments are passed. super(AutomaticDestinationsOLECFPlugin, self).Process( parser_mediator, **kwargs) if not root_item: raise ValueError('Root item not set.') for item in root_item.sub_items: if item.name == 'DestList': self.ParseDestList(parser_mediator, item) elif self._RE_LNK_ITEM_NAME.match(item.name): display_name = parser_mediator.GetDisplayName() if display_name: display_name = '{0:s} # {1:s}'.format(display_name, item.name) else: display_name = '# {0:s}'.format(item.name) parser_mediator.AppendToParserChain(self._WINLNK_PARSER) try: item.seek(0, os.SEEK_SET) self._WINLNK_PARSER.ParseFileLNKFile( parser_mediator, item, display_name) finally: parser_mediator.PopFromParserChain()
289,005
Retrieves the inode from the inode value. Args: inode_value (int|str): inode, such as 1 or '27-128-1'. Returns: int: inode or -1 if the inode value cannot be converted to an integer.
def _GetInode(self, inode_value): if isinstance(inode_value, py2to3.INTEGER_TYPES): return inode_value if isinstance(inode_value, float): return int(inode_value) if not isinstance(inode_value, py2to3.STRING_TYPES): return -1 if b'-' in inode_value: inode_value, _, _ = inode_value.partition(b'-') try: return int(inode_value, 10) except ValueError: return -1
289,008
Adds an attribute that will be set on all events produced. Setting attributes using this method will cause events produced via this mediator to have an attribute with the provided name set with the provided value. Args: attribute_name (str): name of the attribute to add. attribute_value (str): value of the attribute to add. Raises: KeyError: if the event attribute is already set.
def AddEventAttribute(self, attribute_name, attribute_value): if attribute_name in self._extra_event_attributes: raise KeyError('Event attribute {0:s} already set'.format( attribute_name)) self._extra_event_attributes[attribute_name] = attribute_value
289,010
Retrieves the display name for a file entry. Args: file_entry (Optional[dfvfs.FileEntry]): file entry object, where None will return the display name of self._file_entry. Returns: str: human readable string that describes the path to the file entry. Raises: ValueError: if the file entry is missing.
def GetDisplayName(self, file_entry=None): if file_entry is None: file_entry = self._file_entry if file_entry is None: raise ValueError('Missing file entry') path_spec = getattr(file_entry, 'path_spec', None) relative_path = path_helper.PathHelper.GetRelativePathForPathSpec( path_spec, mount_path=self._mount_path) if not relative_path: return file_entry.name return self.GetDisplayNameForPathSpec(path_spec)
289,011
Produces an event source. Args: event_source (EventSource): an event source. Raises: RuntimeError: when storage writer is not set.
def ProduceEventSource(self, event_source): if not self._storage_writer: raise RuntimeError('Storage writer not set.') self._storage_writer.AddEventSource(event_source) self._number_of_event_sources += 1 self.last_activity_timestamp = time.time()
289,016
Produces an event. Args: event (EventObject): event. event_data (EventData): event data. Raises: InvalidEvent: if the event timestamp value is not set or out of bounds.
def ProduceEventWithEventData(self, event, event_data): if event.timestamp is None: raise errors.InvalidEvent('Event timestamp value not set.') if event.timestamp < self._INT64_MIN or event.timestamp > self._INT64_MAX: raise errors.InvalidEvent('Event timestamp value out of bounds.') event_data_hash = event_data.GetAttributeValuesHash() if event_data_hash != self._last_event_data_hash: # Make a copy of the event data before adding additional values. event_data = copy.deepcopy(event_data) # TODO: refactor to ProcessEventData. self.ProcessEvent( event_data, parser_chain=self.GetParserChain(), file_entry=self._file_entry) self._storage_writer.AddEventData(event_data) self._last_event_data_hash = event_data_hash self._last_event_data_identifier = event_data.GetIdentifier() if self._last_event_data_identifier: event.SetEventDataIdentifier(self._last_event_data_identifier) # TODO: remove this after structural fix is in place # https://github.com/log2timeline/plaso/issues/1691 event.parser = self.GetParserChain() self._storage_writer.AddEvent(event) self._number_of_events += 1 self.last_activity_timestamp = time.time()
289,017
Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set.
def ProduceExtractionWarning(self, message, path_spec=None): if not self._storage_writer: raise RuntimeError('Storage writer not set.') if not path_spec and self._file_entry: path_spec = self._file_entry.path_spec parser_chain = self.GetParserChain() warning = warnings.ExtractionWarning( message=message, parser_chain=parser_chain, path_spec=path_spec) self._storage_writer.AddWarning(warning) self._number_of_warnings += 1 self.last_activity_timestamp = time.time()
289,018
Removes an attribute from being set on all events produced. Args: attribute_name (str): name of the attribute to remove. Raises: KeyError: if the event attribute is not set.
def RemoveEventAttribute(self, attribute_name): if attribute_name not in self._extra_event_attributes: raise KeyError('Event attribute: {0:s} not set'.format(attribute_name)) del self._extra_event_attributes[attribute_name]
289,019
Takes a sample of the memory usage for profiling. Args: parser_name (str): name of the parser.
def SampleMemoryUsage(self, parser_name): if self._memory_profiler: used_memory = self._process_information.GetUsedMemory() or 0 self._memory_profiler.Sample(parser_name, used_memory)
289,020
Sets the input source configuration settings. Args: configuration (InputSourceConfiguration): input source configuration.
def SetInputSourceConfiguration(self, configuration): mount_path = configuration.mount_path # Remove a trailing path separator from the mount path so the relative # paths will start with a path separator. if mount_path and mount_path.endswith(os.sep): mount_path = mount_path[:-1] self._mount_path = mount_path
289,021
Sets the storage writer. Args: storage_writer (StorageWriter): storage writer.
def SetStorageWriter(self, storage_writer): self._storage_writer = storage_writer # Reset the last event data information. Each storage file should # contain event data for their events. self._last_event_data_hash = None self._last_event_data_identifier = None
289,022
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration. identifier (str): identifier of the profiling session used to create the sample filename. process_information (ProcessInfo): process information.
def StartProfiling(self, configuration, identifier, process_information): if not configuration: return if configuration.HaveProfileParsers(): identifier = '{0:s}-parsers'.format(identifier) self._cpu_time_profiler = profilers.CPUTimeProfiler( identifier, configuration) self._cpu_time_profiler.Start() self._memory_profiler = profilers.MemoryProfiler( identifier, configuration) self._memory_profiler.Start() self._process_information = process_information
289,023
Compiles the filter expression. The filter expression contains an object filter expression. Args: filter_expression (str): filter expression. Raises: ParseError: if the filter expression cannot be parsed.
def CompileFilter(self, filter_expression): filter_parser = pfilter.BaseParser(filter_expression).Parse() matcher = filter_parser.Compile(pfilter.PlasoAttributeFilterImplementation) self._filter_expression = filter_expression self._matcher = matcher
289,025
Determines if an event matches the filter. Args: event (EventObject): an event. Returns: bool: True if the event matches the filter.
def Match(self, event): if not self._matcher: return True self._decision = self._matcher.Matches(event) return self._decision
289,026
Parses a structure of tokens derived from a line of a text file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
def ParseRecord(self, parser_mediator, key, structure): if key != 'line': raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) msg_value = structure.get('msg') if not msg_value: parser_mediator.ProduceExtractionWarning( 'missing msg value: {0!s}'.format(structure)) return try: seconds = int(msg_value[0], 10) except ValueError: parser_mediator.ProduceExtractionWarning( 'unsupported number of seconds in msg value: {0!s}'.format( structure)) return try: milliseconds = int(msg_value[1], 10) except ValueError: parser_mediator.ProduceExtractionWarning( 'unsupported number of milliseconds in msg value: {0!s}'.format( structure)) return timestamp = ((seconds * 1000) + milliseconds) * 1000 body_text = structure[2][0] try: # Try to parse the body text as key value pairs. Note that not # all log lines will be properly formatted key value pairs. key_value_dict = self._SELINUX_KEY_VALUE_DICT.parseString(body_text) except pyparsing.ParseException: key_value_dict = {} event_data = SELinuxLogEventData() event_data.audit_type = structure.get('type', None) event_data.body = body_text event_data.pid = key_value_dict.get('pid', None) # TODO: pass line number to offset or remove. event_data.offset = 0 event = time_events.TimestampEvent( timestamp, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,028
Initializes a status view. Args: output_writer (OutputWriter): output writer. tool_name (str): namd of the tool.
def __init__(self, output_writer, tool_name): super(StatusView, self).__init__() self._artifact_filters = None self._filter_file = None self._have_ansi_support = not win32console self._mode = self.MODE_WINDOW self._output_writer = output_writer self._source_path = None self._source_type = None self._stdout_output_writer = isinstance( output_writer, tools.StdoutOutputWriter) self._storage_file_path = None self._tool_name = tool_name if win32console: kernel32 = ctypes.windll.kernel32 stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE) result = kernel32.SetConsoleMode( stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE) self._have_ansi_support = result != 0
289,029
Adds an analysis process status table row. Args: process_status (ProcessStatus): processing status. table_view (CLITabularTableView): table view.
def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view): used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory) events = '' if (process_status.number_of_consumed_events is not None and process_status.number_of_consumed_events_delta is not None): events = '{0:d} ({1:d})'.format( process_status.number_of_consumed_events, process_status.number_of_consumed_events_delta) event_tags = '' if (process_status.number_of_produced_event_tags is not None and process_status.number_of_produced_event_tags_delta is not None): event_tags = '{0:d} ({1:d})'.format( process_status.number_of_produced_event_tags, process_status.number_of_produced_event_tags_delta) reports = '' if (process_status.number_of_produced_reports is not None and process_status.number_of_produced_reports_delta is not None): reports = '{0:d} ({1:d})'.format( process_status.number_of_produced_reports, process_status.number_of_produced_reports_delta) table_view.AddRow([ process_status.identifier, process_status.pid, process_status.status, used_memory, events, event_tags, reports])
289,030
Adds an extraction process status table row. Args: process_status (ProcessStatus): processing status. table_view (CLITabularTableView): table view.
def _AddExtractionProcessStatusTableRow(self, process_status, table_view): used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory) sources = '' if (process_status.number_of_produced_sources is not None and process_status.number_of_produced_sources_delta is not None): sources = '{0:d} ({1:d})'.format( process_status.number_of_produced_sources, process_status.number_of_produced_sources_delta) events = '' if (process_status.number_of_produced_events is not None and process_status.number_of_produced_events_delta is not None): events = '{0:d} ({1:d})'.format( process_status.number_of_produced_events, process_status.number_of_produced_events_delta) # TODO: shorten display name to fit in 80 chars and show the filename. table_view.AddRow([ process_status.identifier, process_status.pid, process_status.status, used_memory, sources, events, process_status.display_name])
289,031
Represents a number of bytes in units of 1024. Args: size (int): size in bytes. Returns: str: human readable string of the size.
def _FormatSizeInUnitsOf1024(self, size): magnitude_1024 = 0 used_memory_1024 = float(size) while used_memory_1024 >= 1024: used_memory_1024 /= 1024 magnitude_1024 += 1 if 0 < magnitude_1024 <= 7: return '{0:.1f} {1:s}'.format( used_memory_1024, self._UNITS_1024[magnitude_1024]) return '{0:d} B'.format(size)
289,033
Prints the analysis status header. Args: processing_status (ProcessingStatus): processing status.
def _PrintAnalysisStatusHeader(self, processing_status): self._output_writer.Write( 'Storage file\t\t: {0:s}\n'.format(self._storage_file_path)) self._PrintProcessingTime(processing_status) if processing_status and processing_status.events_status: self._PrintEventsStatus(processing_status.events_status) self._output_writer.Write('\n')
289,034
Prints an analysis status update in linear mode. Args: processing_status (ProcessingStatus): processing status.
def _PrintAnalysisStatusUpdateLinear(self, processing_status): for worker_status in processing_status.workers_status: status_line = ( '{0:s} (PID: {1:d}) - events consumed: {2:d} - running: ' '{3!s}\n').format( worker_status.identifier, worker_status.pid, worker_status.number_of_consumed_events, worker_status.status not in definitions.ERROR_STATUS_INDICATORS) self._output_writer.Write(status_line)
289,035
Prints an analysis status update in window mode. Args: processing_status (ProcessingStatus): processing status.
def _PrintAnalysisStatusUpdateWindow(self, processing_status): if self._stdout_output_writer: self._ClearScreen() output_text = 'plaso - {0:s} version {1:s}\n\n'.format( self._tool_name, plaso.__version__) self._output_writer.Write(output_text) self._PrintAnalysisStatusHeader(processing_status) table_view = views.CLITabularTableView(column_names=[ 'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags', 'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0]) self._AddsAnalysisProcessStatusTableRow( processing_status.foreman_status, table_view) for worker_status in processing_status.workers_status: self._AddsAnalysisProcessStatusTableRow(worker_status, table_view) table_view.Write(self._output_writer) self._output_writer.Write('\n') if processing_status.aborted: self._output_writer.Write( 'Processing aborted - waiting for clean up.\n\n') if self._stdout_output_writer: # We need to explicitly flush stdout to prevent partial status updates. sys.stdout.flush()
289,036
Prints an extraction status update in linear mode. Args: processing_status (ProcessingStatus): processing status.
def _PrintExtractionStatusUpdateLinear(self, processing_status): for worker_status in processing_status.workers_status: status_line = ( '{0:s} (PID: {1:d}) - events produced: {2:d} - file: {3:s} ' '- running: {4!s}\n').format( worker_status.identifier, worker_status.pid, worker_status.number_of_produced_events, worker_status.display_name, worker_status.status not in definitions.ERROR_STATUS_INDICATORS) self._output_writer.Write(status_line)
289,037
Prints an extraction status update in window mode. Args: processing_status (ProcessingStatus): processing status.
def _PrintExtractionStatusUpdateWindow(self, processing_status): if self._stdout_output_writer: self._ClearScreen() output_text = 'plaso - {0:s} version {1:s}\n\n'.format( self._tool_name, plaso.__version__) self._output_writer.Write(output_text) self.PrintExtractionStatusHeader(processing_status) table_view = views.CLITabularTableView(column_names=[ 'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events', 'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0]) self._AddExtractionProcessStatusTableRow( processing_status.foreman_status, table_view) for worker_status in processing_status.workers_status: self._AddExtractionProcessStatusTableRow(worker_status, table_view) table_view.Write(self._output_writer) self._output_writer.Write('\n') if processing_status.aborted: self._output_writer.Write( 'Processing aborted - waiting for clean up.\n\n') # TODO: remove update flicker. For win32console we could set the cursor # top left, write the table, clean the remainder of the screen buffer # and set the cursor at the end of the table. if self._stdout_output_writer: # We need to explicitly flush stdout to prevent partial status updates. sys.stdout.flush()
289,038
Prints the status of the events. Args: events_status (EventsStatus): events status.
def _PrintEventsStatus(self, events_status): if events_status: table_view = views.CLITabularTableView( column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates', 'MACB grouped', 'Total'], column_sizes=[15, 15, 15, 15, 15, 0]) table_view.AddRow([ '', events_status.number_of_filtered_events, events_status.number_of_events_from_time_slice, events_status.number_of_duplicate_events, events_status.number_of_macb_grouped_events, events_status.total_number_of_events]) self._output_writer.Write('\n') table_view.Write(self._output_writer)
289,039
Prints the processing time. Args: processing_status (ProcessingStatus): processing status.
def _PrintProcessingTime(self, processing_status): if not processing_status: processing_time = '00:00:00' else: processing_time = time.time() - processing_status.start_time time_struct = time.gmtime(processing_time) processing_time = time.strftime('%H:%M:%S', time_struct) self._output_writer.Write( 'Processing time\t\t: {0:s}\n'.format(processing_time))
289,040
Prints the status of the tasks. Args: processing_status (ProcessingStatus): processing status.
def _PrintTasksStatus(self, processing_status): if processing_status and processing_status.tasks_status: tasks_status = processing_status.tasks_status table_view = views.CLITabularTableView( column_names=['Tasks:', 'Queued', 'Processing', 'Merging', 'Abandoned', 'Total'], column_sizes=[15, 7, 15, 15, 15, 0]) table_view.AddRow([ '', tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks]) self._output_writer.Write('\n') table_view.Write(self._output_writer)
289,041
Prints the extraction status header. Args: processing_status (ProcessingStatus): processing status.
def PrintExtractionStatusHeader(self, processing_status): self._output_writer.Write( 'Source path\t\t: {0:s}\n'.format(self._source_path)) self._output_writer.Write( 'Source type\t\t: {0:s}\n'.format(self._source_type)) if self._artifact_filters: artifacts_string = ', '.join(self._artifact_filters) self._output_writer.Write('Artifact filters\t: {0:s}\n'.format( artifacts_string)) if self._filter_file: self._output_writer.Write('Filter file\t\t: {0:s}\n'.format( self._filter_file)) self._PrintProcessingTime(processing_status) self._PrintTasksStatus(processing_status) self._output_writer.Write('\n')
289,044
Prints a summary of the extraction. Args: processing_status (ProcessingStatus): processing status.
def PrintExtractionSummary(self, processing_status): if not processing_status: self._output_writer.Write( 'WARNING: missing processing status information.\n') elif not processing_status.aborted: if processing_status.error_path_specs: self._output_writer.Write('Processing completed with errors.\n') else: self._output_writer.Write('Processing completed.\n') number_of_warnings = ( processing_status.foreman_status.number_of_produced_warnings) if number_of_warnings: output_text = '\n'.join([ '', ('Number of warnings generated while extracting events: ' '{0:d}.').format(number_of_warnings), '', 'Use pinfo to inspect warnings in more detail.', '']) self._output_writer.Write(output_text) if processing_status.error_path_specs: output_text = '\n'.join([ '', 'Path specifications that could not be processed:', '']) self._output_writer.Write(output_text) for path_spec in processing_status.error_path_specs: self._output_writer.Write(path_spec.comparable) self._output_writer.Write('\n') self._output_writer.Write('\n')
289,045
Sets the source information. Args: source_path (str): path of the source. source_type (str): source type. artifact_filters (Optional[list[str]]): names of artifact definitions to use as filters. filter_file (Optional[str]): filter file.
def SetSourceInformation( self, source_path, source_type, artifact_filters=None, filter_file=None): self._artifact_filters = artifact_filters self._filter_file = filter_file self._source_path = source_path self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')
289,046
Parses a cookie row. Args: parser_mediator (ParserMediator): parser mediator. query (str): query that created the row. row (sqlite3.Row): row resulting from the query.
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) cookie_name = self._GetRowValue(query_hash, row, 'name') cookie_data = self._GetRowValue(query_hash, row, 'value') hostname = self._GetRowValue(query_hash, row, 'host_key') if hostname.startswith('.'): hostname = hostname[1:] httponly = self._GetRowValue(query_hash, row, 'httponly') path = self._GetRowValue(query_hash, row, 'path') persistent = self._GetRowValue(query_hash, row, 'persistent') secure = self._GetRowValue(query_hash, row, 'secure') if secure: scheme = 'https' else: scheme = 'http' url = '{0:s}://{1:s}{2:s}'.format(scheme, hostname, path) event_data = ChromeCookieEventData() event_data.cookie_name = cookie_name event_data.data = cookie_data event_data.host = hostname event_data.httponly = bool(httponly) event_data.path = path event_data.persistent = bool(persistent) event_data.query = query event_data.secure = bool(secure) event_data.url = url timestamp = self._GetRowValue(query_hash, row, 'creation_utc') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'last_access_utc') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'expires_utc') if timestamp: date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) for plugin in self._cookie_plugins: if cookie_name != plugin.COOKIE_NAME: continue try: plugin.UpdateChainAndProcess( parser_mediator, cookie_data=cookie_data, cookie_name=cookie_name, url=url) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning( 'plugin: {0:s} unable to parse cookie with error: {1!s}'.format( plugin.NAME, exception))
289,049
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): default_fields = ','.join(cls._DEFAULT_FIELDS) argument_group.add_argument( '--fields', dest='fields', type=str, action='store', default=default_fields, help=( 'Defines which fields should be included in the output.')) default_fields = ', '.join(cls._DEFAULT_FIELDS) argument_group.add_argument( '--additional_fields', dest='additional_fields', type=str, action='store', default='', help=( 'Defines extra fields to be included in the output, in addition to' ' the default fields, which are {0:s}.'.format(default_fields)))
289,050
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (OutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when the output filename was not provided.
def ParseOptions(cls, options, output_module): # pylint: disable=arguments-differ if not isinstance(output_module, dynamic.DynamicOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of DynamicOutputModule') default_fields = ','.join(cls._DEFAULT_FIELDS) fields = cls._ParseStringOption( options, 'fields', default_value=default_fields) additional_fields = cls._ParseStringOption( options, 'additional_fields') if additional_fields: fields = '{0:s},{1:s}'.format(fields, additional_fields) output_module.SetFields([ field_name.strip() for field_name in fields.split(',')])
289,051
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): for subkey in registry_key.GetSubkeys(): drive_letter = subkey.name if not drive_letter: continue values_dict = { 'DriveLetter': drive_letter, 'Type': 'Mapped Drive'} # Get the remote path if it exists. remote_path_value = subkey.GetValueByName('RemotePath') if remote_path_value: remote_path = remote_path_value.GetDataAsObject() if remote_path.startswith('\\\\'): server_name, _, share_name = remote_path[2:].partition('\\') values_dict['RemoteServer'] = server_name values_dict['ShareName'] = '\\{0:s}'.format( share_name.replace('#', '\\')) event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = subkey.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,052
Builds the event tag index. Args: storage_file (BaseStorageFile): storage file.
def _Build(self, storage_file): self._index = {} for event_tag in storage_file.GetEventTags(): self.SetEventTag(event_tag)
289,054
Retrieves the most recently updated event tag for an event. Args: storage_file (BaseStorageFile): storage file. event_identifier (AttributeContainerIdentifier): event attribute container identifier. Returns: EventTag: event tag or None if the event has no event tag.
def GetEventTagByIdentifier(self, storage_file, event_identifier): if not self._index: self._Build(storage_file) lookup_key = event_identifier.CopyToString() event_tag_identifier = self._index.get(lookup_key, None) if not event_tag_identifier: return None return storage_file.GetEventTagByIdentifier(event_tag_identifier)
289,055
Sets an event tag in the index. Args: event_tag (EventTag): event tag.
def SetEventTag(self, event_tag): event_identifier = event_tag.GetEventIdentifier() lookup_key = event_identifier.CopyToString() self._index[lookup_key] = event_tag.GetIdentifier()
289,056
Expands a path to contain all users home or profile directories. Expands the artifacts path variable "%%users.homedir%%" or "%%users.userprofile%%". Args: path_segments (list[str]): path segments. path_separator (str): path segment separator. user_accounts (list[UserAccountArtifact]): user accounts. Returns: list[str]: paths returned for user accounts without a drive indicator.
def _ExpandUsersHomeDirectoryPathSegments( cls, path_segments, path_separator, user_accounts): if not path_segments: return [] user_paths = [] first_path_segment = path_segments[0].lower() if first_path_segment not in ('%%users.homedir%%', '%%users.userprofile%%'): if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' user_path = path_separator.join(path_segments) user_paths.append(user_path) else: for user_account in user_accounts: user_path_segments = user_account.GetUserDirectoryPathSegments() if not user_path_segments: continue if cls._IsWindowsDrivePathSegment(user_path_segments[0]): user_path_segments[0] = '' # Prevent concatenating two consecutive path segment separators. if not user_path_segments[-1]: user_path_segments.pop() user_path_segments.extend(path_segments[1:]) user_path = path_separator.join(user_path_segments) user_paths.append(user_path) return user_paths
289,057
Expands path segments with a users variable, e.g. %%users.homedir%%. Args: path_segments (list[str]): path segments. path_separator (str): path segment separator. user_accounts (list[UserAccountArtifact]): user accounts. Returns: list[str]: paths for which the users variables have been expanded.
def _ExpandUsersVariablePathSegments( cls, path_segments, path_separator, user_accounts): if not path_segments: return [] path_segments_lower = [ path_segment.lower() for path_segment in path_segments] if path_segments_lower[0] in ('%%users.homedir%%', '%%users.userprofile%%'): return cls._ExpandUsersHomeDirectoryPathSegments( path_segments, path_separator, user_accounts) path_expansions = cls._PATH_EXPANSIONS_PER_USERS_VARIABLE.get( path_segments[0], None) if path_expansions: expanded_paths = [] for path_expansion in path_expansions: expanded_path_segments = list(path_expansion) expanded_path_segments.extend(path_segments[1:]) paths = cls._ExpandUsersVariablePathSegments( expanded_path_segments, path_separator, user_accounts) expanded_paths.extend(paths) return expanded_paths if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' # TODO: add support for %%users.username%% path = path_separator.join(path_segments) return [path]
289,058
Determines if the path segment contains a Windows Drive indicator. A drive indicator can be a drive letter or %SystemDrive%. Args: path_segment (str): path segment. Returns: bool: True if the path segment contains a Windows Drive indicator.
def _IsWindowsDrivePathSegment(cls, path_segment): if (len(path_segment) == 2 and path_segment[1] == ':' and path_segment[0].isalpha()): return True path_segment = path_segment.upper() return path_segment in ('%%ENVIRON_SYSTEMDRIVE%%', '%SYSTEMDRIVE%')
289,059
Expands a path with a users variable, e.g. %%users.homedir%%. Args: path (str): path with users variable. path_separator (str): path segment separator. user_accounts (list[UserAccountArtifact]): user accounts. Returns: list[str]: paths for which the users variables have been expanded.
def ExpandUsersVariablePath(cls, path, path_separator, user_accounts): path_segments = path.split(path_separator) return cls._ExpandUsersVariablePathSegments( path_segments, path_separator, user_accounts)
289,062
Expands a Windows path containing environment variables. Args: path (str): Windows path with environment variables. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: str: expanded Windows path.
def ExpandWindowsPath(cls, path, environment_variables): if environment_variables is None: environment_variables = [] lookup_table = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.upper() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue lookup_table[attribute_name] = attribute_value path_segments = path.split('\\') # Make a copy of path_segments since this loop can change it. for index, path_segment in enumerate(list(path_segments)): if (len(path_segment) <= 2 or not path_segment.startswith('%') or not path_segment.endswith('%')): continue path_segment_upper_case = path_segment.upper() if path_segment_upper_case.startswith('%%ENVIRON_'): lookup_key = path_segment_upper_case[10:-2] else: lookup_key = path_segment_upper_case[1:-1] path_segment = lookup_table.get(lookup_key, path_segment) path_segment = path_segment.split('\\') expanded_path_segments = list(path_segments[:index]) expanded_path_segments.extend(path_segment) expanded_path_segments.extend(path_segments[index + 1:]) path_segments = expanded_path_segments if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' return '\\'.join(path_segments)
289,063
Attempts to convert the argument to a Unicode string. Args: value (list|int|bytes|str): value to convert. Returns: str: string representation of the argument.
def GetUnicodeString(value): if isinstance(value, list): value = [GetUnicodeString(item) for item in value] return ''.join(value) if isinstance(value, py2to3.INTEGER_TYPES): value = '{0:d}'.format(value) if not isinstance(value, py2to3.UNICODE_TYPE): return codecs.decode(value, 'utf8', 'ignore') return value
289,066
Constructor. Args: arguments: Arguments to the filter. value_expander: A callable that will be used to expand values for the objects passed to this filter. Implementations expanders are provided by subclassing ValueExpander. Raises: ValueError: If the given value_expander is not a subclass of ValueExpander
def __init__(self, arguments=None, value_expander=None): self.value_expander = None self.value_expander_cls = value_expander if self.value_expander_cls: if not issubclass(self.value_expander_cls, ValueExpander): raise ValueError('{0:s} is not a valid value expander'.format( self.value_expander_cls)) self.value_expander = self.value_expander_cls() self.args = arguments or [] logging.debug('Adding {0!s}'.format(arguments))
289,067
Escape backslashes found inside a string quote. Backslashes followed by anything other than [\'"rnbt.ws] will raise an Error. Args: string: The string that matched. match: the match object (instance of re.MatchObject). Where match.group(1) contains the escaped code. Raises: ParseError: When the escaped string is not one of [\'"rnbt]
def StringEscape(self, string, match, **unused_kwargs): if match.group(1) in '\\\'"rnbt\\.ws': self.string += codecs.decode(string, 'unicode_escape') else: raise errors.ParseError('Invalid escape character {0:s}.'.format(string))
289,092
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() page_transition_type = event_values.get('page_transition_type', None) if page_transition_type is not None: page_transition, page_transition_long = self._PAGE_TRANSITIONS.get( page_transition_type, self._UNKNOWN_PAGE_TRANSITION) if page_transition_long: event_values['page_transition'] = '{0:s} - {1:s}'.format( page_transition, page_transition_long) else: event_values['page_transition'] = page_transition visit_source = event_values.get('visit_source', None) if visit_source is not None: event_values['visit_source'] = self._VISIT_SOURCE.get( visit_source, 'UNKNOWN') extras = [] url_hidden = event_values.get('url_hidden', False) if url_hidden: extras.append('(url hidden)') typed_count = event_values.get('typed_count', 0) if typed_count == 0: extras.append('(URL not typed directly - no typed count)') elif typed_count == 1: extras.append('(type count {0:d} time)'.format(typed_count)) else: extras.append('(type count {0:d} times)'.format(typed_count)) event_values['extra'] = ' '.join(extras) return self._ConditionalFormatMessages(event_values)
289,097
Extract data from a MSIE Cache Files (MSIECF) leak item. Every item is stored as an event object, one for each timestamp. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache_directories (list[str]): cache directory names. msiecf_item (pymsiecf.leak): MSIECF leak item. recovered (Optional[bool]): True if the item was recovered.
def _ParseLeak( self, parser_mediator, cache_directories, msiecf_item, recovered=False): # TODO: add support for possible last cache synchronization date and time. date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = MSIECFLeakEventData() event_data.cached_filename = msiecf_item.filename event_data.cached_file_size = msiecf_item.cached_file_size event_data.cache_directory_index = msiecf_item.cache_directory_index event_data.offset = msiecf_item.offset event_data.recovered = recovered if (event_data.cache_directory_index >= 0 and event_data.cache_directory_index < len(cache_directories)): event_data.cache_directory_name = ( cache_directories[event_data.cache_directory_index]) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
289,101
Parses a MSIE Cache File (MSIECF) items. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. msiecf_file (pymsiecf.file): MSIECF file.
def _ParseItems(self, parser_mediator, msiecf_file): format_version = msiecf_file.format_version decode_error = False cache_directories = [] for cache_directory_name in iter(msiecf_file.cache_directories): try: cache_directory_name = cache_directory_name.decode('ascii') except UnicodeDecodeError: decode_error = True cache_directory_name = cache_directory_name.decode( 'ascii', errors='replace') cache_directories.append(cache_directory_name) if decode_error: parser_mediator.ProduceExtractionWarning(( 'unable to decode cache directory names. Characters that cannot ' 'be decoded will be replaced with "?" or "\\ufffd".')) for item_index in range(0, msiecf_file.number_of_items): try: msiecf_item = msiecf_file.get_item(item_index) if isinstance(msiecf_item, pymsiecf.leak): self._ParseLeak(parser_mediator, cache_directories, msiecf_item) elif isinstance(msiecf_item, pymsiecf.redirected): self._ParseRedirected(parser_mediator, msiecf_item) elif isinstance(msiecf_item, pymsiecf.url): self._ParseUrl( parser_mediator, format_version, cache_directories, msiecf_item) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse item: {0:d} with error: {1!s}'.format( item_index, exception)) for item_index in range(0, msiecf_file.number_of_recovered_items): try: msiecf_item = msiecf_file.get_recovered_item(item_index) if isinstance(msiecf_item, pymsiecf.leak): self._ParseLeak( parser_mediator, cache_directories, msiecf_item, recovered=True) elif isinstance(msiecf_item, pymsiecf.redirected): self._ParseRedirected(parser_mediator, msiecf_item, recovered=True) elif isinstance(msiecf_item, pymsiecf.url): self._ParseUrl( parser_mediator, format_version, cache_directories, msiecf_item, recovered=True) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse recovered item: {0:d} with error: {1!s}'.format( item_index, exception))
289,102
Extract data from a MSIE Cache Files (MSIECF) redirected item. Every item is stored as an event object, one for each timestamp. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. msiecf_item (pymsiecf.redirected): MSIECF redirected item. recovered (Optional[bool]): True if the item was recovered.
def _ParseRedirected( self, parser_mediator, msiecf_item, recovered=False): date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = MSIECFRedirectedEventData() event_data.offset = msiecf_item.offset event_data.recovered = recovered event_data.url = msiecf_item.location event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
289,103
Parses a MSIE Cache File (MSIECF) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
def ParseFileObject(self, parser_mediator, file_object): msiecf_file = pymsiecf.file() msiecf_file.set_ascii_codepage(parser_mediator.codepage) try: msiecf_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return try: self._ParseItems(parser_mediator, msiecf_file) finally: msiecf_file.close()
289,105
Extracts relevant Spotlight entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): shortcuts = match.get('UserShortcuts', {}) for search_text, data in iter(shortcuts.items()): datetime_value = data.get('LAST_USED', None) if not datetime_value: continue display_name = data.get('DISPLAY_NAME', '<DISPLAY_NAME>') path = data.get('PATH', '<PATH>') event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Spotlight term searched "{0:s}" associate to {1:s} ({2:s})').format( search_text, display_name, path) event_data.key = search_text event_data.root = '/UserShortcuts' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,106
Retrieves a value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: object: value.
def _GetRowValue(self, query_hash, row, value_name): keys_name_to_index_map = self._keys_per_query.get(query_hash, None) if not keys_name_to_index_map: keys_name_to_index_map = { name: index for index, name in enumerate(row.keys())} self._keys_per_query[query_hash] = keys_name_to_index_map value_index = keys_name_to_index_map.get(value_name) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". return row[value_index]
289,107
Hashes the given row. Args: row (sqlite3.Row): row. Returns: int: hash value of the given row.
def _HashRow(cls, row): values = [] for value in row: try: value = '{0!s}'.format(value) except UnicodeDecodeError: # In Python 2, blobs are "read-write buffer" and will cause a # UnicodeDecodeError exception if we try format it as a string. # Since Python 3 does not support the buffer type we cannot check # the type of value. value = repr(value) values.append(value) return hash(' '.join(values))
289,108
Queries a database and parses the results. Args: parser_mediator (ParserMediator): parser mediator. database (SQLiteDatabase): database. query (str): query. callback (function): function to invoke to parse an individual row. cache (SQLiteCache): cache.
def _ParseQuery(self, parser_mediator, database, query, callback, cache): row_cache = cache.GetRowCache(query) try: rows = database.Query(query) except sqlite3.DatabaseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to run query: {0:s} on database with error: {1!s}'.format( query, exception)) return for index, row in enumerate(rows): if parser_mediator.abort: break row_hash = self._HashRow(row) if row_hash in row_cache: continue try: callback(parser_mediator, query, row, cache=cache, database=database) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'unable to parse row: {0:d} with callback: {1:s} on database ' 'with error: {2!s}').format( index, callback.__name__, exception)) # TODO: consider removing return. return row_cache.add(row_hash)
289,109
Checks the schema of a database with that defined in the plugin. Args: database (SQLiteDatabase): database. Returns: bool: True if the schema of the database matches that defined by the plugin, or False if the schemas do not match or no schema is defined by the plugin.
def CheckSchema(self, database): schema_match = False if self.SCHEMAS: for schema in self.SCHEMAS: if database and database.schema == schema: schema_match = True return schema_match
289,110
Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) cookie_name = self._GetRowValue(query_hash, row, 'name') cookie_value = self._GetRowValue(query_hash, row, 'value') path = self._GetRowValue(query_hash, row, 'path') hostname = self._GetRowValue(query_hash, row, 'domain') if hostname.startswith('.'): hostname = hostname[1:] secure = self._GetRowValue(query_hash, row, 'secure') # The WebView database stores the secure flag as a integer type, # but we represent it as a boolean. secure = secure != 0 if secure: scheme = 'https' else: scheme = 'http' url = '{0:s}://{1:s}{2:s}'.format(scheme, hostname, path) event_data = WebViewCookieEventData() event_data.cookie_name = cookie_name event_data.data = cookie_value event_data.host = hostname event_data.offset = self._GetRowValue(query_hash, row, '_id') event_data.path = path event_data.query = query event_data.secure = secure event_data.url = url timestamp = self._GetRowValue(query_hash, row, 'expires') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) else: date_time = dfdatetime_semantic_time.SemanticTime('Infinity') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) # Go through all cookie plugins to see if there are is any specific parsing # needed. for cookie_plugin in self._cookie_plugins: try: cookie_plugin.UpdateChainAndProcess( parser_mediator, cookie_name=cookie_name, cookie_data=cookie_value, url=url) except errors.WrongPlugin: pass
289,114
Parses a Windows Restore Point (rp.log) log file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): file_size = file_object.get_size() file_header_map = self._GetDataTypeMap('rp_log_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) file_footer_map = self._GetDataTypeMap('rp_log_file_footer') file_footer_offset = file_size - file_footer_map.GetByteSize() try: file_footer, _ = self._ReadStructureFromFileObject( file_object, file_footer_offset, file_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse file footer with error: {0!s}'.format(exception)) return # The description in the file header includes the end-of-string character # that we need to strip off. description = file_header.description.rstrip('\0') if file_footer.creation_time == 0: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=file_footer.creation_time) event_data = RestorePointEventData() event_data.description = description event_data.restore_point_event_type = file_header.event_type event_data.restore_point_type = file_header.restore_point_type event_data.sequence_number = file_header.sequence_number event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
289,116
Initializes a store. Args: maximum_buffer_size (Optional[int]): maximum size of a single storage stream. A value of 0 indicates the limit is _MAXIMUM_BUFFER_SIZE. storage_type (Optional[str]): storage type. Raises: ValueError: if the maximum buffer size value is out of bounds.
def __init__( self, maximum_buffer_size=0, storage_type=definitions.STORAGE_TYPE_SESSION): if (maximum_buffer_size < 0 or maximum_buffer_size > self._MAXIMUM_BUFFER_SIZE): raise ValueError('Maximum buffer size value out of bounds.') if not maximum_buffer_size: maximum_buffer_size = self._MAXIMUM_BUFFER_SIZE super(SQLiteStorageFile, self).__init__() self._connection = None self._cursor = None self._last_session = 0 self._maximum_buffer_size = maximum_buffer_size self._serialized_event_heap = event_heaps.SerializedEventHeap() if storage_type == definitions.STORAGE_TYPE_SESSION: self.compression_format = definitions.COMPRESSION_FORMAT_ZLIB else: self.compression_format = definitions.COMPRESSION_FORMAT_NONE self.format_version = self._FORMAT_VERSION self.serialization_format = definitions.SERIALIZER_FORMAT_JSON self.storage_type = storage_type
289,117
Adds an attribute container. Args: container_type (str): attribute container type. attribute_container (AttributeContainer): attribute container. Raises: IOError: if the attribute container cannot be serialized. OSError: if the attribute container cannot be serialized.
def _AddAttributeContainer(self, container_type, attribute_container): container_list = self._GetSerializedAttributeContainerList(container_type) identifier = identifiers.SQLTableIdentifier( container_type, container_list.next_sequence_number + 1) attribute_container.SetIdentifier(identifier) serialized_data = self._SerializeAttributeContainer(attribute_container) container_list.PushAttributeContainer(serialized_data) if container_list.data_size > self._maximum_buffer_size: self._WriteSerializedAttributeContainerList(container_type)
289,118
Adds an serialized event. Args: event (EventObject): event. Raises: IOError: if the event cannot be serialized. OSError: if the event cannot be serialized.
def _AddSerializedEvent(self, event): identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, self._serialized_event_heap.number_of_events + 1) event.SetIdentifier(identifier) serialized_data = self._SerializeAttributeContainer(event) self._serialized_event_heap.PushEvent(event.timestamp, serialized_data) if self._serialized_event_heap.data_size > self._maximum_buffer_size: self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT)
289,119
Checks the storage metadata. Args: metadata_values (dict[str, str]): metadata values per key. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Raises: IOError: if the format version or the serializer format is not supported. OSError: if the format version or the serializer format is not supported.
def _CheckStorageMetadata(cls, metadata_values, check_readable_only=False): format_version = metadata_values.get('format_version', None) if not format_version: raise IOError('Missing format version.') try: format_version = int(format_version, 10) except (TypeError, ValueError): raise IOError('Invalid format version: {0!s}.'.format(format_version)) if not check_readable_only and format_version != cls._FORMAT_VERSION: raise IOError('Format version: {0:d} is not supported.'.format( format_version)) if format_version < cls._COMPATIBLE_FORMAT_VERSION: raise IOError( 'Format version: {0:d} is too old and no longer supported.'.format( format_version)) if format_version > cls._FORMAT_VERSION: raise IOError( 'Format version: {0:d} is too new and not yet supported.'.format( format_version)) metadata_values['format_version'] = format_version compression_format = metadata_values.get('compression_format', None) if compression_format not in definitions.COMPRESSION_FORMATS: raise IOError('Unsupported compression format: {0:s}'.format( compression_format)) serialization_format = metadata_values.get('serialization_format', None) if serialization_format != definitions.SERIALIZER_FORMAT_JSON: raise IOError('Unsupported serialization format: {0:s}'.format( serialization_format)) storage_type = metadata_values.get('storage_type', None) if storage_type not in definitions.STORAGE_TYPES: raise IOError('Unsupported storage type: {0:s}'.format( storage_type))
289,120
Counts the number of attribute containers of the given type. Args: container_type (str): attribute container type. Returns: int: number of attribute containers of the given type. Raises: ValueError: if an unsupported container_type is provided.
def _CountStoredAttributeContainers(self, container_type): if not container_type in self._CONTAINER_TYPES: raise ValueError('Attribute container type {0:s} is not supported'.format( container_type)) if not self._HasTable(container_type): return 0 # Note that this is SQLite specific, and will give inaccurate results if # there are DELETE commands run on the table. The Plaso SQLite storage # implementation does not run any DELETE commands. query = 'SELECT MAX(_ROWID_) FROM {0:s} LIMIT 1'.format(container_type) self._cursor.execute(query) row = self._cursor.fetchone() if not row: return 0 return row[0] or 0
289,121
Retrieves a specific attribute container. Args: container_type (str): attribute container type. index (int): attribute container index. Returns: AttributeContainer: attribute container or None if not available. Raises: IOError: when there is an error querying the storage file. OSError: when there is an error querying the storage file.
def _GetAttributeContainerByIndex(self, container_type, index): sequence_number = index + 1 query = 'SELECT _data FROM {0:s} WHERE rowid = {1:d}'.format( container_type, sequence_number) try: self._cursor.execute(query) except sqlite3.OperationalError as exception: raise IOError('Unable to query storage file with error: {0!s}'.format( exception)) row = self._cursor.fetchone() if row: identifier = identifiers.SQLTableIdentifier( container_type, sequence_number) if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: serialized_data = zlib.decompress(row[0]) else: serialized_data = row[0] if self._storage_profiler: self._storage_profiler.Sample( 'read', container_type, len(serialized_data), len(row[0])) attribute_container = self._DeserializeAttributeContainer( container_type, serialized_data) attribute_container.SetIdentifier(identifier) return attribute_container count = self._CountStoredAttributeContainers(container_type) index -= count serialized_data = self._GetSerializedAttributeContainerByIndex( container_type, index) attribute_container = self._DeserializeAttributeContainer( container_type, serialized_data) if attribute_container: identifier = identifiers.SQLTableIdentifier( container_type, sequence_number) attribute_container.SetIdentifier(identifier) return attribute_container
289,122
Retrieves a specific type of stored attribute containers. Args: container_type (str): attribute container type. filter_expression (Optional[str]): expression to filter results by. order_by (Optional[str]): name of a column to order the results by. Yields: AttributeContainer: attribute container. Raises: IOError: when there is an error querying the storage file. OSError: when there is an error querying the storage file.
def _GetAttributeContainers( self, container_type, filter_expression=None, order_by=None): query = 'SELECT _identifier, _data FROM {0:s}'.format(container_type) if filter_expression: query = '{0:s} WHERE {1:s}'.format(query, filter_expression) if order_by: query = '{0:s} ORDER BY {1:s}'.format(query, order_by) # Use a local cursor to prevent another query interrupting the generator. cursor = self._connection.cursor() try: cursor.execute(query) except sqlite3.OperationalError as exception: raise IOError('Unable to query storage file with error: {0!s}'.format( exception)) row = cursor.fetchone() while row: identifier = identifiers.SQLTableIdentifier(container_type, row[0]) if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: serialized_data = zlib.decompress(row[1]) else: serialized_data = row[1] if self._storage_profiler: self._storage_profiler.Sample( 'read', container_type, len(serialized_data), len(row[1])) attribute_container = self._DeserializeAttributeContainer( container_type, serialized_data) attribute_container.SetIdentifier(identifier) yield attribute_container row = cursor.fetchone()
289,123
Determines if a specific table exists. Args: table_name (str): name of the table. Returns: bool: True if the table exists, false otherwise.
def _HasTable(self, table_name): query = self._HAS_TABLE_QUERY.format(table_name) self._cursor.execute(query) return bool(self._cursor.fetchone())
289,124
Reads storage metadata and checks that the values are valid. Args: check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to.
def _ReadAndCheckStorageMetadata(self, check_readable_only=False): query = 'SELECT key, value FROM metadata' self._cursor.execute(query) metadata_values = {row[0]: row[1] for row in self._cursor.fetchall()} self._CheckStorageMetadata( metadata_values, check_readable_only=check_readable_only) self.format_version = metadata_values['format_version'] self.compression_format = metadata_values['compression_format'] self.serialization_format = metadata_values['serialization_format'] self.storage_type = metadata_values['storage_type']
289,125
Writes an attribute container. The table for the container type must exist. Args: attribute_container (AttributeContainer): attribute container.
def _WriteAttributeContainer(self, attribute_container): if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT: timestamp, serialized_data = self._serialized_event_heap.PopEvent() else: serialized_data = self._SerializeAttributeContainer(attribute_container) if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: compressed_data = zlib.compress(serialized_data) serialized_data = sqlite3.Binary(compressed_data) else: compressed_data = '' if self._storage_profiler: self._storage_profiler.Sample( 'write', attribute_container.CONTAINER_TYPE, len(serialized_data), len(compressed_data)) if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT: query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)' self._cursor.execute(query, (timestamp, serialized_data)) else: query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format( attribute_container.CONTAINER_TYPE) self._cursor.execute(query, (serialized_data, )) identifier = identifiers.SQLTableIdentifier( attribute_container.CONTAINER_TYPE, self._cursor.lastrowid) attribute_container.SetIdentifier(identifier)
289,126
Writes a serialized attribute container list. Args: container_type (str): attribute container type.
def _WriteSerializedAttributeContainerList(self, container_type): if container_type == self._CONTAINER_TYPE_EVENT: if not self._serialized_event_heap.data_size: return number_of_attribute_containers = ( self._serialized_event_heap.number_of_events) else: container_list = self._GetSerializedAttributeContainerList(container_type) if not container_list.data_size: return number_of_attribute_containers = ( container_list.number_of_attribute_containers) if self._serializers_profiler: self._serializers_profiler.StartTiming('write') if container_type == self._CONTAINER_TYPE_EVENT: query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)' else: query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(container_type) # TODO: directly use container_list instead of values_tuple_list. values_tuple_list = [] for _ in range(number_of_attribute_containers): if container_type == self._CONTAINER_TYPE_EVENT: timestamp, serialized_data = self._serialized_event_heap.PopEvent() else: serialized_data = container_list.PopAttributeContainer() if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: compressed_data = zlib.compress(serialized_data) serialized_data = sqlite3.Binary(compressed_data) else: compressed_data = '' if self._storage_profiler: self._storage_profiler.Sample( 'write', container_type, len(serialized_data), len(compressed_data)) if container_type == self._CONTAINER_TYPE_EVENT: values_tuple_list.append((timestamp, serialized_data)) else: values_tuple_list.append((serialized_data, )) self._cursor.executemany(query, values_tuple_list) if self._serializers_profiler: self._serializers_profiler.StopTiming('write') if container_type == self._CONTAINER_TYPE_EVENT: self._serialized_event_heap.Empty() else: container_list.Empty()
289,127
Adds an warning. Args: warning (ExtractionWarning): warning. Raises: IOError: when the storage file is closed or read-only. OSError: when the storage file is closed or read-only.
def AddWarning(self, warning): self._RaiseIfNotWritable() self._AddAttributeContainer( self._CONTAINER_TYPE_EXTRACTION_WARNING, warning)
289,129
Adds an event. Args: event (EventObject): event. Raises: IOError: when the storage file is closed or read-only or if the event data identifier type is not supported. OSError: when the storage file is closed or read-only or if the event data identifier type is not supported.
def AddEvent(self, event): self._RaiseIfNotWritable() # TODO: change to no longer allow event_data_identifier is None # after refactoring every parser to generate event data. event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: if not isinstance(event_data_identifier, identifiers.SQLTableIdentifier): raise IOError('Unsupported event data identifier type: {0:s}'.format( type(event_data_identifier))) event.event_data_row_identifier = event_data_identifier.row_identifier self._AddSerializedEvent(event)
289,130
Adds event data. Args: event_data (EventData): event data. Raises: IOError: when the storage file is closed or read-only. OSError: when the storage file is closed or read-only.
def AddEventData(self, event_data): self._RaiseIfNotWritable() self._AddAttributeContainer(self._CONTAINER_TYPE_EVENT_DATA, event_data)
289,131
Adds an event source. Args: event_source (EventSource): event source. Raises: IOError: when the storage file is closed or read-only. OSError: when the storage file is closed or read-only.
def AddEventSource(self, event_source): self._RaiseIfNotWritable() self._AddAttributeContainer( self._CONTAINER_TYPE_EVENT_SOURCE, event_source)
289,132
Adds an event tag. Args: event_tag (EventTag): event tag. Raises: IOError: when the storage file is closed or read-only or if the event identifier type is not supported. OSError: when the storage file is closed or read-only or if the event identifier type is not supported.
def AddEventTag(self, event_tag): self._RaiseIfNotWritable() event_identifier = event_tag.GetEventIdentifier() if not isinstance(event_identifier, identifiers.SQLTableIdentifier): raise IOError('Unsupported event identifier type: {0:s}'.format( type(event_identifier))) event_tag.event_row_identifier = event_identifier.row_identifier self._AddAttributeContainer(self._CONTAINER_TYPE_EVENT_TAG, event_tag)
289,133
Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized.
def AddEventTags(self, event_tags): self._RaiseIfNotWritable() for event_tag in event_tags: self.AddEventTag(event_tag)
289,134
Checks if the storage file format is supported. Args: path (str): path to the storage file. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Returns: bool: True if the format is supported.
def CheckSupportedFormat(cls, path, check_readable_only=False): try: connection = sqlite3.connect( path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) cursor = connection.cursor() query = 'SELECT * FROM metadata' cursor.execute(query) metadata_values = {row[0]: row[1] for row in cursor.fetchall()} cls._CheckStorageMetadata( metadata_values, check_readable_only=check_readable_only) connection.close() result = True except (IOError, sqlite3.DatabaseError): result = False return result
289,135
Retrieves a specific event tag. Args: identifier (SQLTableIdentifier): event tag identifier. Returns: EventTag: event tag or None if not available.
def GetEventTagByIdentifier(self, identifier): event_tag = self._GetAttributeContainerByIndex( self._CONTAINER_TYPE_EVENT_TAG, identifier.row_identifier - 1) if event_tag: event_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier) event_tag.SetEventIdentifier(event_identifier) del event_tag.event_row_identifier return event_tag
289,140
Retrieves the events in increasing chronological order. Args: time_range (Optional[TimeRange]): time range used to filter events that fall in a specific period. Yield: EventObject: event.
def GetSortedEvents(self, time_range=None): filter_expression = None if time_range: filter_expression = [] if time_range.start_timestamp: filter_expression.append( '_timestamp >= {0:d}'.format(time_range.start_timestamp)) if time_range.end_timestamp: filter_expression.append( '_timestamp <= {0:d}'.format(time_range.end_timestamp)) filter_expression = ' AND '.join(filter_expression) event_generator = self._GetAttributeContainers( self._CONTAINER_TYPE_EVENT, filter_expression=filter_expression, order_by='_timestamp') for event in event_generator: if hasattr(event, 'event_data_row_identifier'): event_data_identifier = identifiers.SQLTableIdentifier( 'event_data', event.event_data_row_identifier) event.SetEventDataIdentifier(event_data_identifier) del event.event_data_row_identifier yield event
289,144
Opens the storage. Args: path (Optional[str]): path to the storage file. read_only (Optional[bool]): True if the file should be opened in read-only mode. Raises: IOError: if the storage file is already opened or if the database cannot be connected. OSError: if the storage file is already opened or if the database cannot be connected. ValueError: if path is missing.
def Open(self, path=None, read_only=True, **unused_kwargs): if self._is_open: raise IOError('Storage file already opened.') if not path: raise ValueError('Missing path.') path = os.path.abspath(path) connection = sqlite3.connect( path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) cursor = connection.cursor() if not cursor: return self._connection = connection self._cursor = cursor self._is_open = True self._read_only = read_only if read_only: self._ReadAndCheckStorageMetadata(check_readable_only=True) else: # self._cursor.execute('PRAGMA journal_mode=MEMORY') # Turn off insert transaction integrity since we want to do bulk insert. self._cursor.execute('PRAGMA synchronous=OFF') if not self._HasTable('metadata'): self._WriteStorageMetadata() else: self._ReadAndCheckStorageMetadata() if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: data_column_type = 'BLOB' else: data_column_type = 'TEXT' for container_type in self._CONTAINER_TYPES: if not self._HasTable(container_type): if container_type == self._CONTAINER_TYPE_EVENT: query = self._CREATE_EVENT_TABLE_QUERY.format( container_type, data_column_type) else: query = self._CREATE_TABLE_QUERY.format( container_type, data_column_type) self._cursor.execute(query) self._connection.commit() last_session_start = self._CountStoredAttributeContainers( self._CONTAINER_TYPE_SESSION_START) last_session_completion = self._CountStoredAttributeContainers( self._CONTAINER_TYPE_SESSION_COMPLETION) # Initialize next_sequence_number based on the file contents so that # SQLTableIdentifier points to the correct attribute container. for container_type in self._REFERENCED_CONTAINER_TYPES: container_list = self._GetSerializedAttributeContainerList(container_type) container_list.next_sequence_number = ( self._CountStoredAttributeContainers(container_type)) # TODO: handle open sessions. if last_session_start != last_session_completion: logger.warning('Detected unclosed session.') self._last_session = last_session_completion
289,146
Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information.
def ReadPreprocessingInformation(self, knowledge_base): generator = self._GetAttributeContainers( self._CONTAINER_TYPE_SYSTEM_CONFIGURATION) for stream_number, system_configuration in enumerate(generator): # TODO: replace stream_number by session_identifier. knowledge_base.ReadSystemConfigurationArtifact( system_configuration, session_identifier=stream_number)
289,147
Writes preprocessing information. Args: knowledge_base (KnowledgeBase): contains the preprocessing information. Raises: IOError: if the storage type does not support writing preprocess information or the storage file is closed or read-only. OSError: if the storage type does not support writing preprocess information or the storage file is closed or read-only.
def WritePreprocessingInformation(self, knowledge_base): self._RaiseIfNotWritable() if self.storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Preprocess information not supported by storage type.') system_configuration = knowledge_base.GetSystemConfigurationArtifact() self._WriteAttributeContainer(system_configuration)
289,148
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--viper-hash', '--viper_hash', dest='viper_hash', type=str, action='store', choices=viper.ViperAnalyzer.SUPPORTED_HASHES, default=cls._DEFAULT_HASH, metavar='HASH', help=( 'Type of hash to use to query the Viper server, the default is: ' '{0:s}. Supported options: {1:s}').format( cls._DEFAULT_HASH, ', '.join( viper.ViperAnalyzer.SUPPORTED_HASHES))) argument_group.add_argument( '--viper-host', '--viper_host', dest='viper_host', type=str, action='store', default=cls._DEFAULT_HOST, metavar='HOST', help=( 'Hostname of the Viper server to query, the default is: ' '{0:s}'.format(cls._DEFAULT_HOST))) argument_group.add_argument( '--viper-port', '--viper_port', dest='viper_port', type=int, action='store', default=cls._DEFAULT_PORT, metavar='PORT', help=( 'Port of the Viper server to query, the default is: {0:d}.'.format( cls._DEFAULT_PORT))) argument_group.add_argument( '--viper-protocol', '--viper_protocol', dest='viper_protocol', type=str, choices=viper.ViperAnalyzer.SUPPORTED_PROTOCOLS, action='store', default=cls._DEFAULT_PROTOCOL, metavar='PROTOCOL', help=( 'Protocol to use to query Viper, the default is: {0:s}. ' 'Supported options: {1:s}').format( cls._DEFAULT_PROTOCOL, ', '.join( viper.ViperAnalyzer.SUPPORTED_PROTOCOLS)))
289,149
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (ViperAnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when unable to connect to Viper instance.
def ParseOptions(cls, options, analysis_plugin): if not isinstance(analysis_plugin, viper.ViperAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of ViperAnalysisPlugin') lookup_hash = cls._ParseStringOption( options, 'viper_hash', default_value=cls._DEFAULT_HASH) analysis_plugin.SetLookupHash(lookup_hash) host = cls._ParseStringOption( options, 'viper_host', default_value=cls._DEFAULT_HOST) analysis_plugin.SetHost(host) port = cls._ParseNumericOption( options, 'viper_port', default_value=cls._DEFAULT_PORT) analysis_plugin.SetPort(port) protocol = cls._ParseStringOption( options, 'viper_protocol', default_value=cls._DEFAULT_PROTOCOL) protocol = protocol.lower().strip() analysis_plugin.SetProtocol(protocol) if not analysis_plugin.TestConnection(): raise errors.BadConfigOption( 'Unable to connect to Viper {0:s}:{1:d}'.format(host, port))
289,150
Parses a bookmark annotation row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseBookmarkAnnotationRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = FirefoxPlacesBookmarkAnnotationEventData() event_data.content = self._GetRowValue(query_hash, row, 'content') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
289,156
Parses a bookmark folder row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseBookmarkFolderRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) title = self._GetRowValue(query_hash, row, 'title') event_data = FirefoxPlacesBookmarkFolderEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = title or 'N/A' timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
289,157
Parses a bookmark row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseBookmarkRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) rev_host = self._GetRowValue(query_hash, row, 'rev_host') bookmark_type = self._GetRowValue(query_hash, row, 'type') event_data = FirefoxPlacesBookmarkEventData() event_data.host = rev_host or 'N/A' event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.places_title = self._GetRowValue(query_hash, row, 'places_title') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title') event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
289,158
Parses a page visited row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database.
def ParsePageVisitedRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): query_hash = hash(query) from_visit = self._GetRowValue(query_hash, row, 'from_visit') hidden = self._GetRowValue(query_hash, row, 'hidden') rev_host = self._GetRowValue(query_hash, row, 'rev_host') typed = self._GetRowValue(query_hash, row, 'typed') # TODO: make extra conditional formatting. extras = [] if from_visit: extras.append('visited from: {0:s}'.format( self._GetUrl(from_visit, cache, database))) if hidden == '1': extras.append('(url hidden)') if typed == '1': extras.append('(directly typed)') else: extras.append('(URL not typed directly)') event_data = FirefoxPlacesPageVisitedEventData() event_data.host = self._ReverseHostname(rev_host) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type') if extras: event_data.extra = extras timestamp = self._GetRowValue(query_hash, row, 'visit_date') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
289,159
Reverses the hostname and strips the leading dot. The hostname entry is reversed: moc.elgoog.www. Should be: www.google.com Args: hostname (str): reversed hostname. Returns: str: hostname without a leading dot.
def _ReverseHostname(self, hostname): if not hostname: return '' if len(hostname) <= 1: return hostname if hostname[-1] == '.': return hostname[::-1][1:] return hostname[::-1][0:]
289,160
Retrieves an URL from a reference to an entry in the from_visit table. Args: url_id (str): identifier of the visited URL. cache (SQLiteCache): cache. database (SQLiteDatabase): database. Returns: str: URL and hostname.
def _GetUrl(self, url_id, cache, database): url_cache_results = cache.GetResults('url') if not url_cache_results: result_set = database.Query(self.URL_CACHE_QUERY) cache.CacheQueryResults( result_set, 'url', 'id', ('url', 'rev_host')) url_cache_results = cache.GetResults('url') url, reverse_host = url_cache_results.get(url_id, ['', '']) if not url: return '' hostname = self._ReverseHostname(reverse_host) return '{0:s} ({1:s})'.format(url, hostname)
289,161
Parses a downloads row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseDownloadsRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = FirefoxDownloadEventData() event_data.full_path = self._GetRowValue(query_hash, row, 'target') event_data.mime_type = self._GetRowValue(query_hash, row, 'mimeType') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.received_bytes = self._GetRowValue(query_hash, row, 'currBytes') event_data.referrer = self._GetRowValue(query_hash, row, 'referrer') event_data.temporary_location = self._GetRowValue( query_hash, row, 'tempPath') event_data.total_bytes = self._GetRowValue(query_hash, row, 'maxBytes') event_data.url = self._GetRowValue(query_hash, row, 'source') timestamp = self._GetRowValue(query_hash, row, 'startTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'endTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_END) parser_mediator.ProduceEventWithEventData(event, event_data)
289,162
Initializes a process. Args: processing_configuration (ProcessingConfiguration): processing configuration. enable_sigsegv_handler (Optional[bool]): True if the SIGSEGV handler should be enabled. kwargs (dict[str,object]): keyword arguments to pass to multiprocessing.Process.
def __init__( self, processing_configuration, enable_sigsegv_handler=False, **kwargs): super(MultiProcessBaseProcess, self).__init__(**kwargs) self._debug_output = False self._enable_sigsegv_handler = enable_sigsegv_handler self._guppy_memory_profiler = None self._log_filename = None self._memory_profiler = None self._original_sigsegv_handler = None # TODO: check if this can be replaced by self.pid or does this only apply # to the parent process? self._pid = None self._processing_configuration = processing_configuration self._process_information = None self._processing_profiler = None self._quiet_mode = False self._rpc_server = None self._serializers_profiler = None self._status_is_running = False self._storage_profiler = None self._tasks_profiler = None if self._processing_configuration: self._debug_output = self._processing_configuration.debug_output if processing_configuration.log_filename: log_path = os.path.dirname(self._processing_configuration.log_filename) log_filename = os.path.basename( self._processing_configuration.log_filename) log_filename = '{0:s}_{1:s}'.format(self._name, log_filename) self._log_filename = os.path.join(log_path, log_filename) # We need to share the RPC port number with the engine process. self.rpc_port = multiprocessing.Value('I', 0)
289,163