docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Analyzes events in a plaso storage. Args: storage_writer (StorageWriter): storage writer. analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that should be run and their names. event_filter (Optional[FilterObject]): event filter. Returns: collections.Counter: counter containing information about the events processed and filtered. Raises: RuntimeError: if a non-recoverable situation is encountered.
def _AnalyzeEvents(self, storage_writer, analysis_plugins, event_filter=None): self._status = definitions.STATUS_INDICATOR_RUNNING self._number_of_consumed_events = 0 self._number_of_consumed_reports = 0 self._number_of_consumed_sources = 0 self._number_of_consumed_warnings = 0 self._number_of_produced_events = 0 self._number_of_produced_reports = 0 self._number_of_produced_sources = 0 self._number_of_produced_warnings = 0 number_of_filtered_events = 0 logger.debug('Processing events.') filter_limit = getattr(event_filter, 'limit', None) for event in storage_writer.GetSortedEvents(): event_data_identifier = event.GetEventDataIdentifier() if event_data_identifier: event_data = storage_writer.GetEventDataByIdentifier( event_data_identifier) if event_data: for attribute_name, attribute_value in event_data.GetAttributes(): setattr(event, attribute_name, attribute_value) event_identifier = event.GetIdentifier() event.tag = self._event_tag_index.GetEventTagByIdentifier( storage_writer, event_identifier) if event_filter: filter_match = event_filter.Match(event) else: filter_match = None # pylint: disable=singleton-comparison if filter_match == False: number_of_filtered_events += 1 continue for event_queue in self._event_queues.values(): # TODO: Check for premature exit of analysis plugins. event_queue.PushItem(event) self._number_of_consumed_events += 1 if (event_filter and filter_limit and filter_limit == self._number_of_consumed_events): break logger.debug('Finished pushing events to analysis plugins.') # Signal that we have finished adding events. for event_queue in self._event_queues.values(): event_queue.PushItem(plaso_queue.QueueAbort(), block=False) logger.debug('Processing analysis plugin results.') # TODO: use a task based approach. plugin_names = [plugin_name for plugin_name in analysis_plugins.keys()] while plugin_names: for plugin_name in list(plugin_names): if self._abort: break # TODO: temporary solution. task = tasks.Task() task.identifier = plugin_name merge_ready = storage_writer.CheckTaskReadyForMerge(task) if merge_ready: storage_writer.PrepareMergeTaskStorage(task) self._status = definitions.STATUS_INDICATOR_MERGING event_queue = self._event_queues[plugin_name] del self._event_queues[plugin_name] event_queue.Close() storage_merge_reader = storage_writer.StartMergeTaskStorage(task) storage_merge_reader.MergeAttributeContainers( callback=self._MergeEventTag) # TODO: temporary solution. plugin_names.remove(plugin_name) self._status = definitions.STATUS_INDICATOR_RUNNING self._number_of_produced_event_tags = ( storage_writer.number_of_event_tags) self._number_of_produced_reports = ( storage_writer.number_of_analysis_reports) try: storage_writer.StopTaskStorage(abort=self._abort) except (IOError, OSError) as exception: logger.error('Unable to stop task storage with error: {0!s}'.format( exception)) if self._abort: logger.debug('Processing aborted.') else: logger.debug('Processing completed.') events_counter = collections.Counter() events_counter['Events filtered'] = number_of_filtered_events events_counter['Events processed'] = self._number_of_consumed_events return events_counter
287,818
Checks the status of an analysis process. Args: pid (int): process ID (PID) of a registered analysis process. Raises: KeyError: if the process is not registered with the engine.
def _CheckStatusAnalysisProcess(self, pid): # TODO: Refactor this method, simplify and separate concerns (monitoring # vs management). self._RaiseIfNotRegistered(pid) if pid in self._completed_analysis_processes: status_indicator = definitions.STATUS_INDICATOR_COMPLETED process_status = { 'processing_status': status_indicator} used_memory = 0 else: process = self._processes_per_pid[pid] process_status = self._QueryProcessStatus(process) if process_status is None: process_is_alive = False else: process_is_alive = True process_information = self._process_information_per_pid[pid] used_memory = process_information.GetUsedMemory() or 0 if self._worker_memory_limit and used_memory > self._worker_memory_limit: logger.warning(( 'Process: {0:s} (PID: {1:d}) killed because it exceeded the ' 'memory limit: {2:d}.').format( process.name, pid, self._worker_memory_limit)) self._KillProcess(pid) if isinstance(process_status, dict): self._rpc_errors_per_pid[pid] = 0 status_indicator = process_status.get('processing_status', None) if status_indicator == definitions.STATUS_INDICATOR_COMPLETED: self._completed_analysis_processes.add(pid) else: rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1 self._rpc_errors_per_pid[pid] = rpc_errors if rpc_errors > self._MAXIMUM_RPC_ERRORS: process_is_alive = False if process_is_alive: rpc_port = process.rpc_port.value logger.warning(( 'Unable to retrieve process: {0:s} (PID: {1:d}) status via ' 'RPC socket: http://localhost:{2:d}').format( process.name, pid, rpc_port)) processing_status_string = 'RPC error' status_indicator = definitions.STATUS_INDICATOR_RUNNING else: processing_status_string = 'killed' status_indicator = definitions.STATUS_INDICATOR_KILLED process_status = { 'processing_status': processing_status_string} self._UpdateProcessingStatus(pid, process_status, used_memory) if status_indicator in definitions.ERROR_STATUS_INDICATORS: logger.error(( 'Process {0:s} (PID: {1:d}) is not functioning correctly. ' 'Status code: {2!s}.').format( process.name, pid, status_indicator)) self._TerminateProcessByPid(pid)
287,819
Exports an event using an output module. Args: output_module (OutputModule): output module. event (EventObject): event. deduplicate_events (Optional[bool]): True if events should be deduplicated.
def _ExportEvent(self, output_module, event, deduplicate_events=True): if event.timestamp != self._export_event_timestamp: self._FlushExportBuffer( output_module, deduplicate_events=deduplicate_events) self._export_event_timestamp = event.timestamp self._export_event_heap.PushEvent(event)
287,820
Flushes buffered events and writes them to the output module. Args: output_module (OutputModule): output module. deduplicate_events (Optional[bool]): True if events should be deduplicated.
def _FlushExportBuffer(self, output_module, deduplicate_events=True): last_macb_group_identifier = None last_content_identifier = None macb_group = [] generator = self._export_event_heap.PopEvents() for macb_group_identifier, content_identifier, event in generator: if deduplicate_events and last_content_identifier == content_identifier: self._events_status.number_of_duplicate_events += 1 continue if macb_group_identifier is None: if macb_group: output_module.WriteEventMACBGroup(macb_group) macb_group = [] output_module.WriteEvent(event) else: if (last_macb_group_identifier == macb_group_identifier or not macb_group): macb_group.append(event) else: output_module.WriteEventMACBGroup(macb_group) macb_group = [event] self._events_status.number_of_macb_grouped_events += 1 last_macb_group_identifier = macb_group_identifier last_content_identifier = content_identifier if macb_group: output_module.WriteEventMACBGroup(macb_group)
287,822
Merges an event tag with the last stored event tag. If there is an existing event the provided event tag is updated with the contents of the existing one. After which the event tag index is updated. Args: storage_writer (StorageWriter): storage writer. attribute_container (AttributeContainer): container.
def _MergeEventTag(self, storage_writer, attribute_container): if attribute_container.CONTAINER_TYPE != 'event_tag': return event_identifier = attribute_container.GetEventIdentifier() if not event_identifier: return # Check if the event has already been tagged on a previous occasion, # we need to append the event tag to the last stored one. stored_event_tag = self._event_tag_index.GetEventTagByIdentifier( storage_writer, event_identifier) if stored_event_tag: attribute_container.AddComment(stored_event_tag.comment) attribute_container.AddLabels(stored_event_tag.labels) self._event_tag_index.SetEventTag(attribute_container)
287,823
Starts the analysis processes. Args: storage_writer (StorageWriter): storage writer. analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that should be run and their names.
def _StartAnalysisProcesses(self, storage_writer, analysis_plugins): logger.info('Starting analysis plugins.') for analysis_plugin in analysis_plugins.values(): self._analysis_plugins[analysis_plugin.NAME] = analysis_plugin process = self._StartWorkerProcess(analysis_plugin.NAME, storage_writer) if not process: logger.error('Unable to create analysis process: {0:s}'.format( analysis_plugin.NAME)) logger.info('Analysis plugins running')
287,824
Stops the analysis processes. Args: abort (bool): True to indicated the stop is issued on abort.
def _StopAnalysisProcesses(self, abort=False): logger.debug('Stopping analysis processes.') self._StopMonitoringProcesses() # Note that multiprocessing.Queue is very sensitive regarding # blocking on either a get or a put. So we try to prevent using # any blocking behavior. if abort: # Signal all the processes to abort. self._AbortTerminate() if not self._use_zeromq: logger.debug('Emptying queues.') for event_queue in self._event_queues.values(): event_queue.Empty() # Wake the processes to make sure that they are not blocking # waiting for the queue new items. for event_queue in self._event_queues.values(): event_queue.PushItem(plaso_queue.QueueAbort(), block=False) # Try waiting for the processes to exit normally. self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=abort) if abort: # Kill any remaining processes. self._AbortKill() else: # Check if the processes are still alive and terminate them if necessary. self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=True)
287,826
Updates the processing status. Args: pid (int): process identifier (PID) of the worker process. process_status (dict[str, object]): status values received from the worker process. used_memory (int): size of used memory in bytes. Raises: KeyError: if the process is not registered with the engine.
def _UpdateProcessingStatus(self, pid, process_status, used_memory): self._RaiseIfNotRegistered(pid) if not process_status: return process = self._processes_per_pid[pid] status_indicator = process_status.get('processing_status', None) self._RaiseIfNotMonitored(pid) display_name = process_status.get('display_name', '') number_of_consumed_event_tags = process_status.get( 'number_of_consumed_event_tags', None) number_of_produced_event_tags = process_status.get( 'number_of_produced_event_tags', None) number_of_consumed_events = process_status.get( 'number_of_consumed_events', None) number_of_produced_events = process_status.get( 'number_of_produced_events', None) number_of_consumed_reports = process_status.get( 'number_of_consumed_reports', None) number_of_produced_reports = process_status.get( 'number_of_produced_reports', None) number_of_consumed_sources = process_status.get( 'number_of_consumed_sources', None) number_of_produced_sources = process_status.get( 'number_of_produced_sources', None) number_of_consumed_warnings = process_status.get( 'number_of_consumed_warnings', None) number_of_produced_warnings = process_status.get( 'number_of_produced_warnings', None) if status_indicator != definitions.STATUS_INDICATOR_IDLE: last_activity_timestamp = process_status.get( 'last_activity_timestamp', 0.0) if last_activity_timestamp: last_activity_timestamp += self._PROCESS_WORKER_TIMEOUT current_timestamp = time.time() if current_timestamp > last_activity_timestamp: logger.error(( 'Process {0:s} (PID: {1:d}) has not reported activity within ' 'the timeout period.').format(process.name, pid)) status_indicator = definitions.STATUS_INDICATOR_NOT_RESPONDING self._processing_status.UpdateWorkerStatus( process.name, status_indicator, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings)
287,828
Creates, starts, monitors and registers a worker process. Args: process_name (str): process name. storage_writer (StorageWriter): storage writer for a session storage used to create task storage. Returns: MultiProcessWorkerProcess: extraction worker process or None on error.
def _StartWorkerProcess(self, process_name, storage_writer): analysis_plugin = self._analysis_plugins.get(process_name, None) if not analysis_plugin: logger.error('Missing analysis plugin: {0:s}'.format(process_name)) return None if self._use_zeromq: queue_name = '{0:s} output event queue'.format(process_name) output_event_queue = zeromq_queue.ZeroMQPushBindQueue( name=queue_name, timeout_seconds=self._QUEUE_TIMEOUT) # Open the queue so it can bind to a random port, and we can get the # port number to use in the input queue. output_event_queue.Open() else: output_event_queue = multi_process_queue.MultiProcessingQueue( timeout=self._QUEUE_TIMEOUT) self._event_queues[process_name] = output_event_queue if self._use_zeromq: queue_name = '{0:s} input event queue'.format(process_name) input_event_queue = zeromq_queue.ZeroMQPullConnectQueue( name=queue_name, delay_open=True, port=output_event_queue.port, timeout_seconds=self._QUEUE_TIMEOUT) else: input_event_queue = output_event_queue process = analysis_process.AnalysisProcess( input_event_queue, storage_writer, self._knowledge_base, analysis_plugin, self._processing_configuration, data_location=self._data_location, event_filter_expression=self._event_filter_expression, name=process_name) process.start() logger.info('Started analysis plugin: {0:s} (PID: {1:d}).'.format( process_name, process.pid)) try: self._StartMonitoringProcess(process) except (IOError, KeyError) as exception: logger.error(( 'Unable to monitor analysis plugin: {0:s} (PID: {1:d}) ' 'with error: {2!s}').format(process_name, process.pid, exception)) process.terminate() return None self._RegisterProcess(process) return process
287,829
Determines the linked path. Args: event (EventObject): event that contains a linked path. Returns: str: linked path.
def _GetLinkedPath(self, event): if hasattr(event, 'local_path'): return event.local_path if hasattr(event, 'network_path'): return event.network_path if hasattr(event, 'relative_path'): paths = [] if hasattr(event, 'working_directory'): paths.append(event.working_directory) paths.append(event.relative_path) return '\\'.join(paths) return 'Unknown'
287,832
Initializes a mount point. Args: mount_path (Optional[str]): path where the path specification is mounted, such as "/mnt/image" or "C:\\". path_specification (Optional[dfvfs.PathSpec]): path specification.
def __init__(self, mount_path=None, path_specification=None): super(MountPoint, self).__init__() self.mount_path = mount_path self.path_specification = path_specification
287,834
Decodes the URL, replaces %XX to their corresponding characters. Args: url (str): encoded URL. Returns: str: decoded URL.
def _DecodeURL(self, url): if not url: return '' decoded_url = urlparse.unquote(url) if isinstance(decoded_url, py2to3.BYTES_TYPE): try: decoded_url = decoded_url.decode('utf-8') except UnicodeDecodeError as exception: decoded_url = decoded_url.decode('utf-8', errors='replace') logger.warning( 'Unable to decode URL: {0:s} with error: {1!s}'.format( url, exception)) return decoded_url
287,836
Extracts a search query from a GMail search URL. GMail: https://mail.google.com/mail/u/0/#search/query[/?] Args: url (str): URL. Returns: str: search query or None if no query was found.
def _ExtractGMailSearchQuery(self, url): if 'search/' not in url: return None _, _, line = url.partition('search/') line, _, _ = line.partition('/') line, _, _ = line.partition('?') return line.replace('+', ' ')
287,837
Extracts a search query from a Google docs URL. Google Docs: https://docs.google.com/.*/u/0/?q=query Args: url (str): URL. Returns: str: search query or None if no query was found.
def _ExtractGoogleDocsSearchQuery(self, url): if 'q=' not in url: return None line = self._GetBetweenQEqualsAndAmpersand(url) if not line: return None return line.replace('+', ' ')
287,838
Extracts a search query from a Google URL. Google Drive: https://drive.google.com/drive/search?q=query Google Search: https://www.google.com/search?q=query Google Sites: https://sites.google.com/site/.*/system/app/pages/ search?q=query Args: url (str): URL. Returns: str: search query or None if no query was found.
def _ExtractGoogleSearchQuery(self, url): if 'search' not in url or 'q=' not in url: return None line = self._GetBetweenQEqualsAndAmpersand(url) if not line: return None return line.replace('+', ' ')
287,839
Extracts a search query from a Yahoo search URL. Examples: https://search.yahoo.com/search?p=query https://search.yahoo.com/search;?p=query Args: url (str): URL. Returns: str: search query or None if no query was found.
def _ExtractYahooSearchQuery(self, url): if 'p=' not in url: return None _, _, line = url.partition('p=') before_and, _, _ = line.partition('&') if not before_and: return None yahoo_search_url = before_and.split()[0] return yahoo_search_url.replace('+', ' ')
287,840
Extracts a search query from a Yandex search URL. Yandex: https://www.yandex.com/search/?text=query Args: url (str): URL. Returns: str: search query or None if no query was found.
def _ExtractYandexSearchQuery(self, url): if 'text=' not in url: return None _, _, line = url.partition('text=') before_and, _, _ = line.partition('&') if not before_and: return None yandex_search_url = before_and.split()[0] return yandex_search_url.replace('+', ' ')
287,841
Retrieves the substring between the substrings 'q=' and '&'. Args: url (str): URL. Returns: str: search query, the value between 'q=' and '&' or None if no query was found.
def _GetBetweenQEqualsAndAmpersand(self, url): # Make sure we're analyzing the query part of the URL. _, _, url = url.partition('?') # Look for a key value pair named 'q'. _, _, url = url.partition('q=') if not url: return '' # Strip additional key value pairs. url, _, _ = url.partition('&') return url
287,842
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: analysis report.
def CompileReport(self, mediator): results = {} for key, count in iter(self._counter.items()): search_engine, _, search_term = key.partition(':') results.setdefault(search_engine, {}) results[search_engine][search_term] = count lines_of_text = [] for search_engine, terms in sorted(results.items()): lines_of_text.append(' == ENGINE: {0:s} =='.format(search_engine)) for search_term, count in sorted( terms.items(), key=lambda x: (x[1], x[0]), reverse=True): lines_of_text.append('{0:d} {1:s}'.format(count, search_term)) # An empty string is added to have SetText create an empty line. lines_of_text.append('') lines_of_text.append('') report_text = '\n'.join(lines_of_text) analysis_report = reports.AnalysisReport( plugin_name=self.NAME, text=report_text) analysis_report.report_array = self._search_term_timeline analysis_report.report_dict = results return analysis_report
287,843
Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
def ExamineEvent(self, mediator, event): # This event requires an URL attribute. url = getattr(event, 'url', None) if not url: return # TODO: refactor this the source should be used in formatting only. # Check if we are dealing with a web history event. source, _ = formatters_manager.FormattersManager.GetSourceStrings(event) if source != 'WEBHIST': return for engine, url_expression, method_name in self._URL_FILTERS: callback_method = getattr(self, method_name, None) if not callback_method: logger.warning('Missing method: {0:s}'.format(callback_method)) continue match = url_expression.search(url) if not match: continue search_query = callback_method(url) if not search_query: logger.warning('Missing search query for URL: {0:s}'.format(url)) continue search_query = self._DecodeURL(search_query) if not search_query: continue event_tag = self._CreateEventTag( event, self._EVENT_TAG_COMMENT, self._EVENT_TAG_LABELS) mediator.ProduceEventTag(event_tag) self._counter['{0:s}:{1:s}'.format(engine, search_query)] += 1 # Add the timeline format for each search term. timestamp = getattr(event, 'timestamp', 0) source = getattr(event, 'parser', 'N/A') source = getattr(event, 'plugin', source) self._search_term_timeline.append( SEARCH_OBJECT(timestamp, source, engine, search_query))
287,844
Parses a conversation row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseConversationRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TangoAndroidConversationEventData() event_data.conversation_identifier = self._GetRowValue( query_hash, row, 'conv_id') # TODO: payload is a base64 encoded binary blob, we need to find the # structure to extract the relevant bits. # event_data.payload = self._GetRowValue(query_hash, row, 'payload') date_time = dfdatetime_semantic_time.NotSet() event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
287,848
Parses a message row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TangoAndroidMessageEventData() event_data.message_identifier = self._GetRowValue( query_hash, row, 'msg_id') # TODO: payload is a base64 encoded binary blob, we need to find the # structure to extract the relevant bits. # event_data.payload = self._GetRowValue(query_hash, row, 'payload') event_data.direction = self._GetRowValue(query_hash, row, 'direction') timestamp = self._GetRowValue(query_hash, row, 'create_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'send_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_SENT) parser_mediator.ProduceEventWithEventData(event, event_data)
287,849
Parses a contact row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TangoAndroidContactEventData() first_name = self._GetRowValue(query_hash, row, 'first_name') try: decoded_text = base64_decode(first_name) event_data.first_name = codecs.decode(decoded_text, 'utf-8') except ValueError: event_data.first_name = first_name parser_mediator.ProduceExtractionWarning( 'unable to parse first name: {0:s}'.format(first_name)) last_name = self._GetRowValue(query_hash, row, 'last_name') try: decoded_text = base64_decode(last_name) event_data.last_name = codecs.decode(decoded_text, 'utf-8') except ValueError: event_data.last_name = last_name parser_mediator.ProduceExtractionWarning( 'unable to parse last name: {0:s}'.format(last_name)) event_data.birthday = self._GetRowValue(query_hash, row, 'birthday') event_data.gender = self._GetRowValue(query_hash, row, 'gender') status = self._GetRowValue(query_hash, row, 'status') try: decoded_text = base64_decode(status) event_data.status = codecs.decode(decoded_text, 'utf-8') except ValueError: event_data.status = status parser_mediator.ProduceExtractionWarning( 'unable to parse status: {0:s}'.format(status)) event_data.distance = self._GetRowValue(query_hash, row, 'distance') is_friend = self._GetRowValue(query_hash, row, 'friend') event_data.is_friend = False if is_friend: event_data.is_friend = True event_data.friend_request_type = self._GetRowValue( query_hash, row, 'friend_request_type') friend_request_message = self._GetRowValue( query_hash, row, 'friend_request_message') try: decoded_text = base64_decode(friend_request_message) event_data.friend_request_message = codecs.decode(decoded_text, 'utf-8') except ValueError: event_data.friend_request_message = friend_request_message parser_mediator.ProduceExtractionWarning( 'unable to parse status: {0:s}'.format(friend_request_message)) timestamp = self._GetRowValue(query_hash, row, 'last_active_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACTIVE) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'last_access_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'friend_request_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_SENT) parser_mediator.ProduceEventWithEventData(event, event_data)
287,850
Retrieves a string representation of the event. Args: event (EventObject): event. Returns: str: string representation of the event.
def GetFormattedEventObject(cls, event): time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp) lines_of_text = [ '+-' * 40, '[Timestamp]:', ' {0:s}'.format(time_string)] pathspec = getattr(event, 'pathspec', None) if pathspec: lines_of_text.append('[Pathspec]:') attribute_string = pathspec.comparable.replace('\n', '\n ') attribute_string = ' {0:s}\n'.format(attribute_string) lines_of_text.append(attribute_string) # TODO: add support for event tag after event clean up. lines_of_text.append('[Reserved attributes]:') out_additional = ['[Additional attributes]:'] for attribute_name, attribute_value in sorted(event.GetAttributes()): if attribute_name not in definitions.RESERVED_VARIABLE_NAMES: attribute_string = ' {{{0!s}}} {1!s}'.format( attribute_name, attribute_value) out_additional.append(attribute_string) elif attribute_name not in ('pathspec', 'tag'): attribute_string = ' {{{0!s}}} {1!s}'.format( attribute_name, attribute_value) lines_of_text.append(attribute_string) lines_of_text.append('') out_additional.append('') lines_of_text.extend(out_additional) return '\n'.join(lines_of_text)
287,851
Writes the body of an event to the output. Args: event (EventObject): event.
def WriteEventBody(self, event): output_string = NativePythonFormatterHelper.GetFormattedEventObject(event) self._output_writer.Write(output_string)
287,852
Initializes the RPC server object. Args: callback (function): callback to invoke on get status RPC request.
def __init__(self, callback): super(RPCServer, self).__init__() self._callback = callback
287,853
Pushes a task onto the heap. Args: task (Task): task. Raises: ValueError: if the size of the storage file is not set in the task.
def PushTask(self, task): storage_file_size = getattr(task, 'storage_file_size', None) if not storage_file_size: raise ValueError('Task storage file size not set.') if task.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY: weight = 1 else: weight = storage_file_size task.merge_priority = weight heap_values = (weight, task) heapq.heappush(self._heap, heap_values) self._task_identifiers.add(task.identifier)
287,856
Updates the latest processing time of the task manager from the task. This method does not lock the manager and should be called by a method holding the manager lock. Args: task (Task): task to update the processing time of.
def _UpdateLatestProcessingTime(self, task): self._latest_task_processing_time = max( self._latest_task_processing_time, task.last_processing_time)
287,860
Checks if the task should be merged. Args: task (Task): task. Returns: bool: True if the task should be merged. Raises: KeyError: if the task was not queued, processing or abandoned.
def CheckTaskToMerge(self, task): with self._lock: is_abandoned = task.identifier in self._tasks_abandoned is_processing = task.identifier in self._tasks_processing is_queued = task.identifier in self._tasks_queued if not is_queued and not is_processing and not is_abandoned: raise KeyError('Status of task {0:s} is unknown.'.format( task.identifier)) return is_queued or is_processing or is_abandoned and not task.has_retry
287,861
Creates a task. Args: session_identifier (str): the identifier of the session the task is part of. Returns: Task: task attribute container.
def CreateTask(self, session_identifier): task = tasks.Task(session_identifier) logger.debug('Created task: {0:s}.'.format(task.identifier)) with self._lock: self._tasks_queued[task.identifier] = task self._total_number_of_tasks += 1 self.SampleTaskStatus(task, 'created') return task
287,863
Completes a task. The task is complete and can be removed from the task manager. Args: task (Task): task. Raises: KeyError: if the task was not merging.
def CompleteTask(self, task): with self._lock: if task.identifier not in self._tasks_merging: raise KeyError('Task {0:s} was not merging.'.format(task.identifier)) self.SampleTaskStatus(task, 'completed') del self._tasks_merging[task.identifier] logger.debug('Completed task {0:s}.'.format(task.identifier))
287,864
Retrieves a task that has been processed. Args: task_identifier (str): unique identifier of the task. Returns: Task: a task that has been processed. Raises: KeyError: if the task was not processing, queued or abandoned.
def GetProcessedTaskByIdentifier(self, task_identifier): with self._lock: task = self._tasks_processing.get(task_identifier, None) if not task: task = self._tasks_queued.get(task_identifier, None) if not task: task = self._tasks_abandoned.get(task_identifier, None) if not task: raise KeyError('Status of task {0:s} is unknown'.format( task_identifier)) return task
287,866
Retrieves the first task that is pending merge or has a higher priority. This function will check if there is a task with a higher merge priority than the current_task being merged. If so, that task with the higher priority is returned. Args: current_task (Task): current task being merged or None if no such task. Returns: Task: the next task to merge or None if there is no task pending merge or with a higher priority.
def GetTaskPendingMerge(self, current_task): next_task = self._tasks_pending_merge.PeekTask() if not next_task: return None if current_task and next_task.merge_priority > current_task.merge_priority: return None with self._lock: next_task = self._tasks_pending_merge.PopTask() self._tasks_merging[next_task.identifier] = next_task return next_task
287,868
Removes an abandoned task. Args: task (Task): task. Raises: KeyError: if the task was not abandoned or the task was abandoned and was not retried.
def RemoveTask(self, task): with self._lock: if task.identifier not in self._tasks_abandoned: raise KeyError('Task {0:s} was not abandoned.'.format(task.identifier)) if not task.has_retry: raise KeyError( 'Will not remove a task {0:s} without retry task.'.format( task.identifier)) del self._tasks_abandoned[task.identifier] logger.debug('Removed task {0:s}.'.format(task.identifier))
287,870
Takes a sample of the status of the task for profiling. Args: task (Task): a task. status (str): status.
def SampleTaskStatus(self, task, status): if self._tasks_profiler: self._tasks_profiler.Sample(task, status)
287,871
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration. identifier (str): identifier of the profiling session used to create the sample filename.
def StartProfiling(self, configuration, identifier): if not configuration: return if configuration.HaveProfileTasks(): self._tasks_profiler = profilers.TasksProfiler(identifier, configuration) self._tasks_profiler.Start()
287,872
Updates the task manager to reflect the task is ready to be merged. Args: task (Task): task. Raises: KeyError: if the task was not queued, processing or abandoned, or the task was abandoned and has a retry task.
def UpdateTaskAsPendingMerge(self, task): with self._lock: is_abandoned = task.identifier in self._tasks_abandoned is_processing = task.identifier in self._tasks_processing is_queued = task.identifier in self._tasks_queued if not is_queued and not is_processing and not is_abandoned: raise KeyError('Status of task {0:s} is unknown.'.format( task.identifier)) if is_abandoned and task.has_retry: raise KeyError('Will not merge a task {0:s} with retry task.'.format( task.identifier)) if is_queued: logger.debug('Task {0:s} was queued, now merging.'.format( task.identifier)) del self._tasks_queued[task.identifier] if is_processing: logger.debug('Task {0:s} was processing, now merging.'.format( task.identifier)) del self._tasks_processing[task.identifier] if is_abandoned: logger.debug('Task {0:s} was abandoned, now merging.'.format( task.identifier)) del self._tasks_abandoned[task.identifier] self._tasks_pending_merge.PushTask(task) self.SampleTaskStatus(task, 'pending_merge') task.UpdateProcessingTime() self._UpdateLatestProcessingTime(task)
287,873
Updates the task manager to reflect the task is processing. Args: task_identifier (str): unique identifier of the task. Raises: KeyError: if the task is not known to the task manager.
def UpdateTaskAsProcessingByIdentifier(self, task_identifier): with self._lock: task_processing = self._tasks_processing.get(task_identifier, None) if task_processing: task_processing.UpdateProcessingTime() self._UpdateLatestProcessingTime(task_processing) return task_queued = self._tasks_queued.get(task_identifier, None) if task_queued: logger.debug('Task {0:s} was queued, now processing.'.format( task_identifier)) self._tasks_processing[task_identifier] = task_queued del self._tasks_queued[task_identifier] task_queued.UpdateProcessingTime() self._UpdateLatestProcessingTime(task_queued) return task_abandoned = self._tasks_abandoned.get(task_identifier, None) if task_abandoned: del self._tasks_abandoned[task_identifier] self._tasks_processing[task_identifier] = task_abandoned logger.debug('Task {0:s} was abandoned, but now processing.'.format( task_identifier)) task_abandoned.UpdateProcessingTime() self._UpdateLatestProcessingTime(task_abandoned) return if task_identifier in self._tasks_pending_merge: # No need to update the processing time, as this task is already # finished processing and is just waiting for merge. return # If we get here, we don't know what state the tasks is in, so raise. raise KeyError('Status of task {0:s} is unknown.'.format(task_identifier))
287,874
Parses an SMS row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseSmsRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) sms_read = self._GetRowValue(query_hash, row, 'read') sms_type = self._GetRowValue(query_hash, row, 'type') event_data = AndroidSMSEventData() event_data.address = self._GetRowValue(query_hash, row, 'address') event_data.body = self._GetRowValue(query_hash, row, 'body') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.sms_read = self.SMS_READ.get(sms_read, 'UNKNOWN') event_data.sms_type = self.SMS_TYPE.get(sms_type, 'UNKNOWN') timestamp = self._GetRowValue(query_hash, row, 'date') date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
287,876
Extracts event objects from a Explorer ProgramsCache value data. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. Raises: ParseError: if the value data could not be parsed.
def _ParseValueData(self, parser_mediator, registry_key, registry_value): value_data = registry_value.data value_data_size = len(value_data) if value_data_size < 4: return header_map = self._GetDataTypeMap('programscache_header') try: header = self._ReadStructureFromByteStream( value_data, 0, header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse header value with error: {0!s}'.format( exception)) return if header.format_version not in (1, 9, 12, 19): parser_mediator.ProduceExtractionWarning( 'unsupported format version: {0:d}'.format(header.format_version)) return known_folder_identifier = None if header.format_version == 1: value_data_offset = 8 elif header.format_version == 9: value_data_offset = 6 elif header.format_version in (12, 19): known_folder_identifier = uuid.UUID(bytes_le=value_data[4:20]) value_data_offset = 20 entry_header_map = self._GetDataTypeMap('programscache_entry_header') entry_footer_map = self._GetDataTypeMap('programscache_entry_footer') sentinel = 0 if header.format_version != 9: try: entry_footer = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse sentinel at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) return value_data_offset += entry_footer_map.GetByteSize() sentinel = entry_footer.sentinel link_targets = [] while sentinel in (0x00, 0x01): if value_data_offset >= value_data_size: break try: entry_header = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse entry header at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) break value_data_offset += entry_header_map.GetByteSize() display_name = '{0:s} {1:s}'.format( registry_key.path, registry_value.name) shell_items_parser = shell_items.ShellItemsParser(display_name) shell_items_parser.ParseByteStream( parser_mediator, value_data[value_data_offset:], codepage=parser_mediator.codepage) link_target = shell_items_parser.CopyToPath() link_targets.append(link_target) value_data_offset += entry_header.data_size try: entry_footer = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse entry footer at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) return value_data_offset += entry_footer_map.GetByteSize() sentinel = entry_footer.sentinel # TODO: recover remaining items. if known_folder_identifier: known_folder_identifier = '{0!s}'.format(known_folder_identifier) event_data = windows_events.WindowsRegistryListEventData() event_data.key_path = registry_key.path event_data.known_folder_identifier = known_folder_identifier event_data.list_name = registry_value.name event_data.list_values = ' '.join([ '{0:d}: {1:s}'.format(index, link_target) for index, link_target in enumerate(link_targets)]) event_data.value_name = registry_value.name event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
287,877
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): registry_value = registry_key.GetValueByName('ProgramsCache') if registry_value: self._ParseValueData(parser_mediator, registry_key, registry_value) registry_value = registry_key.GetValueByName('ProgramsCacheSMP') if registry_value: self._ParseValueData(parser_mediator, registry_key, registry_value) registry_value = registry_key.GetValueByName('ProgramsCacheTBP') if registry_value: self._ParseValueData(parser_mediator, registry_key, registry_value) values_dict = {} for registry_value in registry_key.GetValues(): # Ignore the default value. if not registry_value.name or registry_value.name in ( 'ProgramsCache', 'ProgramsCacheSMP', 'ProgramsCacheTBP'): continue # Ignore any value that is empty or that does not contain a string. if not registry_value.data or not registry_value.DataIsString(): continue values_dict[registry_value.name] = registry_value.GetDataAsObject() event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
287,878
Initializes the CLI tool object. Args: input_reader (Optional[InputReader]): input reader, where None indicates that the stdin input reader should be used. output_writer (Optional[OutputWriter]): output writer, where None indicates that the stdout output writer should be used.
def __init__(self, input_reader=None, output_writer=None): super(PstealTool, self).__init__( input_reader=input_reader, output_writer=output_writer) self._artifacts_registry = None self._command_line_arguments = None self._deduplicate_events = True self._enable_sigsegv_handler = False self._knowledge_base = knowledge_base.KnowledgeBase() self._number_of_analysis_reports = 0 self._number_of_extraction_workers = 0 self._output_format = None self._parsers_manager = parsers_manager.ParsersManager self._preferred_language = 'en-US' self._preferred_year = None self._status_view_mode = status_view.StatusView.MODE_WINDOW self._status_view = status_view.StatusView(self._output_writer, self.NAME) self._time_slice = None self._use_time_slicer = False self.list_hashers = False self.list_language_identifiers = False self.list_output_modules = False self.list_parsers_and_plugins = False self.list_timezones = False
287,880
Parses tool specific options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def ParseOptions(self, options): # The extraction options are dependent on the data location. helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['data_location']) self._ReadParserPresetsFromFile() # The output modules options are dependent on the preferred language # and preferred time zone options. self._ParseTimezoneOption(options) argument_helper_names = [ 'artifact_definitions', 'hashers', 'language', 'parsers'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) self.list_hashers = self._hasher_names_string == 'list' self.list_language_identifiers = self._preferred_language == 'list' self.list_parsers_and_plugins = self._parser_filter_expression == 'list' # Check the list options first otherwise required options will raise. if (self.list_hashers or self.list_language_identifiers or self.list_parsers_and_plugins or self.list_timezones): return # Check output modules after the other listable options, otherwise # it could raise with "requires an output file". helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['output_modules']) self.list_output_modules = self._output_format == 'list' if self.list_output_modules: return self._ParseInformationalOptions(options) argument_helper_names = ['extraction', 'status_view'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) self._ParseLogFileOptions(options) self._ParseStorageMediaOptions(options) self._ParsePerformanceOptions(options) self._ParseProcessingOptions(options) self._storage_file_path = getattr(options, 'storage_file', None) if not self._storage_file_path: self._storage_file_path = self._GenerateStorageFileName() self._output_filename = getattr(options, 'write', None) if not self._output_filename: raise errors.BadConfigOption(( 'Output format: {0:s} requires an output file ' '(-w OUTPUT_FILE)').format(self._output_format)) if os.path.exists(self._output_filename): raise errors.BadConfigOption( 'Output file already exists: {0:s}.'.format(self._output_filename)) self._EnforceProcessMemoryLimit(self._process_memory_limit) self._output_module = self._CreateOutputModule(options)
287,884
Formats the description. Args: event (EventObject): event. Returns: str: formatted description field.
def _FormatDescription(self, event): date_time_string = timelib.Timestamp.CopyToIsoFormat( event.timestamp, timezone=self._output_mediator.timezone) timestamp_description = event.timestamp_desc or 'UNKNOWN' message, _ = self._output_mediator.GetFormattedMessages(event) if message is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) description = '{0:s}; {1:s}; {2:s}'.format( date_time_string, timestamp_description, message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' ')) return self._SanitizeField(description)
287,885
Formats the hostname. Args: event (EventObject): event. Returns: str: formatted hostname field.
def _FormatHostname(self, event): hostname = self._output_mediator.GetHostname(event) return self._SanitizeField(hostname)
287,886
Formats the source. Args: event (EventObject): event. Returns: str: formatted source field.
def _FormatSource(self, event): source_short, _ = self._output_mediator.GetFormattedSources(event) if source_short is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return self._SanitizeField(source_short)
287,887
Formats the username. Args: event (EventObject): event. Returns: str: formatted username field.
def _FormatUsername(self, event): username = self._output_mediator.GetUsername(event) return self._SanitizeField(username)
287,888
Sanitizes a field for output. This method removes the field delimiter from the field string. Args: field (str): field value. Returns: str: formatted field value.
def _SanitizeField(self, field): if self._FIELD_DELIMITER and isinstance(field, py2to3.STRING_TYPES): return field.replace(self._FIELD_DELIMITER, ' ') return field
287,889
Formats the notes. Args: event (EventObject): event. Returns: str: formatted notes field.
def _FormatNotes(self, event): inode = event.inode if inode is None: inode = '-' notes = getattr(event, 'notes', '') if not notes: display_name = getattr(event, 'display_name', '') notes = 'File: {0:s} inode: {1!s}'.format(display_name, inode) return self._SanitizeField(notes)
287,890
Writes the body of an event object to the output. Args: event (EventObject): event.
def WriteEventBody(self, event): if not hasattr(event, 'timestamp'): return # TODO: preserve dfdatetime as an object. date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=event.timestamp) posix_timestamp = date_time.CopyToPosixTimestamp() if not posix_timestamp: posix_timestamp = 0 source = self._FormatSource(event) hostname = self._FormatHostname(event) username = self._FormatUsername(event) description = self._FormatDescription(event) notes = self._FormatNotes(event) out_write = '{0:d}|{1:s}|{2:s}|{3:s}|{4:s}|{5!s}|{6!s}\n'.format( posix_timestamp, source, hostname, username, description, self._output_mediator.timezone, notes) self._output_writer.Write(out_write)
287,891
Parses a search row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseSearchRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterAndroidSearchEventData() event_data.query = query event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.search_query = self._GetRowValue(query_hash, row, 'query') timestamp = self._GetRowValue(query_hash, row, 'time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
287,895
Parses a status row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterAndroidStatusEventData() event_data.query = query event_data.identifier = self._GetRowValue(query_hash, row, '_id') event_data.author_identifier = self._GetRowValue( query_hash, row, 'author_id') event_data.username = self._GetRowValue(query_hash, row, 'username') event_data.content = self._GetRowValue(query_hash, row, 'content') event_data.favorited = self._GetRowValue(query_hash, row, 'favorited') event_data.retweeted = self._GetRowValue(query_hash, row, 'retweeted') timestamp = self._GetRowValue(query_hash, row, 'time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
287,896
Parses a status row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterAndroidContactEventData() event_data.query = query event_data.identifier = self._GetRowValue(query_hash, row, '_id') event_data.user_identifier = self._GetRowValue(query_hash, row, 'user_id') event_data.username = self._GetRowValue(query_hash, row, 'username') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.description = self._GetRowValue(query_hash, row, 'description') event_data.web_url = self._GetRowValue(query_hash, row, 'web_url') event_data.location = self._GetRowValue(query_hash, row, 'location') event_data.followers = self._GetRowValue(query_hash, row, 'followers') event_data.friends = self._GetRowValue(query_hash, row, 'friends') event_data.statuses = self._GetRowValue(query_hash, row, 'statuses') event_data.image_url = self._GetRowValue(query_hash, row, 'image_url') timestamp = self._GetRowValue(query_hash, row, 'profile_created') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'updated') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'friendship_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
287,897
Converts an attribute value into a JSON dictionary. Args: attribute_value (object): an attribute value. Returns: dict|list: The JSON serialized object which can be a dictionary or a list.
def _ConvertAttributeValueToDict(cls, attribute_value): if isinstance(attribute_value, py2to3.BYTES_TYPE): encoded_value = binascii.b2a_qp(attribute_value) encoded_value = codecs.decode(encoded_value, 'ascii') attribute_value = { '__type__': 'bytes', 'stream': '{0:s}'.format(encoded_value) } elif isinstance(attribute_value, (list, tuple)): json_list = [] for list_element in attribute_value: json_dict = cls._ConvertAttributeValueToDict(list_element) json_list.append(json_dict) if isinstance(attribute_value, list): attribute_value = json_list else: attribute_value = { '__type__': 'tuple', 'values': json_list } elif isinstance(attribute_value, collections.Counter): attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value) elif isinstance(attribute_value, dfvfs_path_spec.PathSpec): attribute_value = cls._ConvertPathSpecToDict(attribute_value) elif isinstance(attribute_value, containers_interface.AttributeContainer): attribute_value = cls._ConvertAttributeContainerToDict(attribute_value) return attribute_value
287,899
Converts a JSON list into an object. Args: json_list (list[object]): JSON serialized objects. Returns: list[object]: a deserialized list.
def _ConvertListToObject(cls, json_list): list_value = [] for json_list_element in json_list: if isinstance(json_list_element, dict): list_value.append(cls._ConvertDictToObject(json_list_element)) elif isinstance(json_list_element, list): list_value.append(cls._ConvertListToObject(json_list_element)) else: list_value.append(json_list_element) return list_value
287,903
Reads an attribute container from serialized form. Args: json_string (str): JSON serialized attribute container. Returns: AttributeContainer: attribute container or None.
def ReadSerialized(cls, json_string): # pylint: disable=arguments-differ if json_string: json_dict = json.loads(json_string) return cls.ReadSerializedDict(json_dict) return None
287,906
Reads an attribute container from serialized dictionary form. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: AttributeContainer: attribute container or None. Raises: TypeError: if the serialized dictionary does not contain an AttributeContainer.
def ReadSerializedDict(cls, json_dict): if json_dict: json_object = cls._ConvertDictToObject(json_dict) if not isinstance(json_object, containers_interface.AttributeContainer): raise TypeError('{0:s} is not an attribute container type.'.format( type(json_object))) return json_object return None
287,907
Writes an attribute container to serialized form. Args: attribute_container (AttributeContainer): attribute container. Returns: str: A JSON string containing the serialized form.
def WriteSerialized(cls, attribute_container): json_dict = cls.WriteSerializedDict(attribute_container) return json.dumps(json_dict)
287,908
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') process_memory_limit = cls._ParseNumericOption( options, 'process_memory_limit') if process_memory_limit and process_memory_limit < 0: raise errors.BadConfigOption( 'Invalid process memory limit value cannot be negative.') setattr(configuration_object, '_process_memory_limit', process_memory_limit)
287,909
Initializes an output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs. Raises: ValueError: when there are unused keyword arguments.
def __init__(self, output_mediator): super(OutputModule, self).__init__() self._output_mediator = output_mediator
287,910
Reports an event related error. Args: event (EventObject): event. error_message (str): error message.
def _ReportEventError(self, event, error_message): event_identifier = event.GetIdentifier() event_identifier_string = event_identifier.CopyToString() display_name = getattr(event, 'display_name', None) or 'N/A' parser_chain = getattr(event, 'parser', None) or 'N/A' error_message = ( 'Event: {0!s} data type: {1:s} display name: {2:s} ' 'parser chain: {3:s} with error: {4:s}').format( event_identifier_string, event.data_type, display_name, parser_chain, error_message) logger.error(error_message)
287,911
Writes the event to the output. Args: event (EventObject): event.
def WriteEvent(self, event): self.WriteEventStart() try: self.WriteEventBody(event) except errors.NoFormatterFound as exception: error_message = 'unable to retrieve formatter with error: {0!s}'.format( exception) self._ReportEventError(event, error_message) except errors.WrongFormatter as exception: error_message = 'wrong formatter with error: {0!s}'.format(exception) self._ReportEventError(event, error_message) self.WriteEventEnd()
287,912
Initializes a linear output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs. Raises: ValueError: if the output writer is missing.
def __init__(self, output_mediator): super(LinearOutputModule, self).__init__(output_mediator) self._output_writer = None
287,913
Parses a contact row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterIOSContactEventData() event_data.description = self._GetRowValue(query_hash, row, 'description') event_data.followers_count = self._GetRowValue( query_hash, row, 'followersCount') event_data.following = self._GetRowValue(query_hash, row, 'following') event_data.following_count = self._GetRowValue( query_hash, row, 'followingCount') event_data.location = self._GetRowValue(query_hash, row, 'location') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.profile_url = self._GetRowValue( query_hash, row, 'profileImageUrl') event_data.query = query event_data.screen_name = self._GetRowValue(query_hash, row, 'screenName') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'createdDate') if timestamp: # Convert the floating point value to an integer. timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'updatedAt') if timestamp: # Convert the floating point value to an integer. timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE) parser_mediator.ProduceEventWithEventData(event, event_data)
287,916
Parses a contact row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterIOSStatusEventData() event_data.favorite_count = self._GetRowValue( query_hash, row, 'favoriteCount') event_data.favorited = self._GetRowValue(query_hash, row, 'favorited') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.query = query event_data.retweet_count = self._GetRowValue( query_hash, row, 'retweetCount') event_data.text = self._GetRowValue(query_hash, row, 'text') event_data.user_id = self._GetRowValue(query_hash, row, 'user_id') timestamp = self._GetRowValue(query_hash, row, 'date') if timestamp: # Convert the floating point value to an integer. timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'updatedAt') if timestamp: # Convert the floating point value to an integer. timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE) parser_mediator.ProduceEventWithEventData(event, event_data)
287,917
Parses a structure of tokens derived from a line of a text file. Args: parser_mediator (ParserMediator): parser mediator. key (str): identifier of the structure of tokens. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
def ParseRecord(self, parser_mediator, key, structure): if key != 'line': raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=structure.date_time) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return body_text = structure.body if not body_text: parser_mediator.ProduceExtractionWarning( 'invalid body {0:s}'.format(structure.body)) return event_data = DpkgEventData() event_data.body = body_text event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
287,919
Verifies if a line from a text file is in the expected format. Args: parser_mediator (ParserMediator): parser mediator. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
def VerifyStructure(self, parser_mediator, line): try: structure = self._DPKG_LOG_LINE.parseString(line) except pyparsing.ParseException as exception: logger.debug( 'Unable to parse Debian dpkg.log file with error: {0!s}'.format( exception)) return False return 'date_time' in structure and 'body' in structure
287,920
Reads an operating system artifact from a dictionary. Args: operating_system_values (dict[str, object]): operating system values. Returns: OperatingSystemArtifact: an operating system artifact attribute container. Raises: MalformedPresetError: if the format of the operating system values are not set or incorrect.
def _ReadOperatingSystemArtifactValues(self, operating_system_values): if not operating_system_values: raise errors.MalformedPresetError('Missing operating system values.') family = operating_system_values.get('family', None) product = operating_system_values.get('product', None) version = operating_system_values.get('version', None) if not family and not product: raise errors.MalformedPresetError( 'Invalid operating system missing family and product.') return artifacts.OperatingSystemArtifact( family=family, product=product, version=version)
287,922
Reads a parser preset from a dictionary. Args: preset_definition_values (dict[str, object]): preset definition values. Returns: ParserPreset: a parser preset. Raises: MalformedPresetError: if the format of the preset definition is not set or incorrect, or the preset of a specific operating system has already been set.
def _ReadParserPresetValues(self, preset_definition_values): if not preset_definition_values: raise errors.MalformedPresetError('Missing preset definition values.') name = preset_definition_values.get('name', None) if not name: raise errors.MalformedPresetError( 'Invalid preset definition missing name.') parsers = preset_definition_values.get('parsers', None) if not parsers: raise errors.MalformedPresetError( 'Invalid preset definition missing parsers.') parser_preset = ParserPreset(name, parsers) for operating_system_values in preset_definition_values.get( 'operating_systems', []): operating_system = self._ReadOperatingSystemArtifactValues( operating_system_values) parser_preset.operating_systems.append(operating_system) return parser_preset
287,923
Reads parser and parser plugin presets from a file-like object. Args: file_object (file): file-like object containing the parser and parser plugin presets definitions. Yields: ParserPreset: a parser preset. Raises: MalformedPresetError: if one or more plugin preset definitions are malformed.
def _ReadPresetsFromFileObject(self, file_object): yaml_generator = yaml.safe_load_all(file_object) last_preset_definition = None for yaml_definition in yaml_generator: try: preset_definition = self._ReadParserPresetValues(yaml_definition) except errors.MalformedPresetError as exception: error_location = 'At start' if last_preset_definition: error_location = 'After: {0:s}'.format(last_preset_definition.name) raise errors.MalformedPresetError( '{0:s} {1!s}'.format(error_location, exception)) yield preset_definition last_preset_definition = preset_definition
287,924
Retrieves a specific preset definition by name. Args: name (str): name of the preset. Returns: ParserPreset: a parser preset or None if not available.
def GetPresetByName(self, name): name = name.lower() return self._definitions.get(name, None)
287,925
Retrieves preset definitions for a specific operating system. Args: operating_system (OperatingSystemArtifact): an operating system artifact attribute container. Returns: list[PresetDefinition]: preset definition that correspond with the operating system.
def GetPresetsByOperatingSystem(self, operating_system): preset_definitions = [] for preset_definition in self._definitions.values(): for preset_operating_system in preset_definition.operating_systems: if preset_operating_system.IsEquivalent(operating_system): preset_definitions.append(preset_definition) return preset_definitions
287,926
Reads parser and parser plugin presets from a file. Args: path (str): path of file that contains the the parser and parser plugin presets configuration. Raises: MalformedPresetError: if one or more plugin preset definitions are malformed.
def ReadFromFile(self, path): self._definitions = {} with open(path, 'r') as file_object: for preset_definition in self._ReadPresetsFromFileObject(file_object): self._definitions[preset_definition.name] = preset_definition
287,927
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() visit_type = event_values.get('visit_type', 0) transition = self._URL_TRANSITIONS.get(visit_type, None) if transition: transition_str = 'Transition: {0!s}'.format(transition) extra = event_values.get('extra', None) if extra: if transition: extra.append(transition_str) event_values['extra_string'] = ' '.join(extra) elif transition: event_values['extra_string'] = transition_str return self._ConditionalFormatMessages(event_values)
287,928
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--virustotal-api-key', '--virustotal_api_key', dest='virustotal_api_key', type=str, action='store', default=None, metavar='API_KEY', help=( 'Specify the API key for use with VirusTotal.')) argument_group.add_argument( '--virustotal-free-rate-limit', '--virustotal_free_rate_limit', dest='virustotal_free_rate_limit', action='store_false', default=cls._DEFAULT_RATE_LIMIT, help=( 'Limit Virustotal requests to the default free API key rate of ' '4 requests per minute. Set this to false if you have an key ' 'for the private API.')) argument_group.add_argument( '--virustotal-hash', '--virustotal_hash', dest='virustotal_hash', type=str, action='store', choices=['md5', 'sha1', 'sha256'], default=cls._DEFAULT_HASH, metavar='HASH', help=( 'Type of hash to query VirusTotal, the default is: {0:s}'.format( cls._DEFAULT_HASH)))
287,929
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (VirusTotalAnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when a configuration parameter fails validation or when unable to connect to VirusTotal.
def ParseOptions(cls, options, analysis_plugin): if not isinstance(analysis_plugin, virustotal.VirusTotalAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of VirusTotalAnalysisPlugin') api_key = cls._ParseStringOption(options, 'virustotal_api_key') if not api_key: raise errors.BadConfigOption( 'VirusTotal API key not specified. Try again with ' '--virustotal-api-key.') analysis_plugin.SetAPIKey(api_key) enable_rate_limit = getattr( options, 'virustotal_free_rate_limit', cls._DEFAULT_RATE_LIMIT) if enable_rate_limit: analysis_plugin.EnableFreeAPIKeyRateLimit() lookup_hash = cls._ParseStringOption( options, 'virustotal_hash', default_value=cls._DEFAULT_HASH) analysis_plugin.SetLookupHash(lookup_hash) if not analysis_plugin.TestConnection(): raise errors.BadConfigOption('Unable to connect to VirusTotal')
287,930
Processes a path specification. Args: extraction_worker (worker.ExtractionWorker): extraction worker. parser_mediator (ParserMediator): parser mediator. path_spec (dfvfs.PathSpec): path specification.
def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec): self._current_display_name = parser_mediator.GetDisplayNameForPathSpec( path_spec) try: extraction_worker.ProcessPathSpec(parser_mediator, path_spec) except dfvfs_errors.CacheFullError: # TODO: signal engine of failure. self._abort = True logger.error(( 'ABORT: detected cache full error while processing path spec: ' '{0:s}').format(self._current_display_name)) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'unable to process path specification with error: ' '{0!s}').format(exception), path_spec=path_spec) if self._processing_configuration.debug_output: logger.warning(( 'Unhandled exception while processing path specification: ' '{0:s}.').format(self._current_display_name)) logger.exception(exception)
287,934
Processes a task. Args: task (Task): task.
def _ProcessTask(self, task): logger.debug('Started processing task: {0:s}.'.format(task.identifier)) if self._tasks_profiler: self._tasks_profiler.Sample(task, 'processing_started') self._task = task storage_writer = self._storage_writer.CreateTaskStorage(task) if self._serializers_profiler: storage_writer.SetSerializersProfiler(self._serializers_profiler) storage_writer.Open() self._parser_mediator.SetStorageWriter(storage_writer) storage_writer.WriteTaskStart() try: # TODO: add support for more task types. self._ProcessPathSpec( self._extraction_worker, self._parser_mediator, task.path_spec) self._number_of_consumed_sources += 1 if self._guppy_memory_profiler: self._guppy_memory_profiler.Sample() finally: storage_writer.WriteTaskCompletion(aborted=self._abort) self._parser_mediator.SetStorageWriter(None) storage_writer.Close() try: self._storage_writer.FinalizeTaskStorage(task) except IOError: pass self._task = None if self._tasks_profiler: self._tasks_profiler.Sample(task, 'processing_completed') logger.debug('Completed processing task: {0:s}.'.format(task.identifier))
287,935
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--preferred_year', '--preferred-year', dest='preferred_year', type=int, action='store', default=None, metavar='YEAR', help=( 'When a format\'s timestamp does not include a year, e.g. ' 'syslog, use this as the initial year instead of attempting ' 'auto-detection.')) argument_group.add_argument( '--process_archives', '--process-archives', dest='process_archives', action='store_true', default=False, help=( 'Process file entries embedded within archive files, such as ' 'archive.tar and archive.zip. This can make processing ' 'significantly slower.')) argument_group.add_argument( '--skip_compressed_streams', '--skip-compressed-streams', dest='process_compressed_streams', action='store_false', default=True, help=( 'Skip processing file content within compressed streams, such as ' 'syslog.gz and syslog.bz2.'))
287,937
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') preferred_year = cls._ParseNumericOption(options, 'preferred_year') process_archives = getattr(options, 'process_archives', False) process_compressed_streams = getattr( options, 'process_compressed_streams', True) setattr(configuration_object, '_preferred_year', preferred_year) setattr(configuration_object, '_process_archives', process_archives) setattr( configuration_object, '_process_compressed_streams', process_compressed_streams)
287,938
Initializes an environment variable artifact. Args: case_sensitive (Optional[bool]): True if environment variable name is case sensitive. name (Optional[str]): environment variable name. value (Optional[str]): environment variable value.
def __init__(self, case_sensitive=True, name=None, value=None): super(EnvironmentVariableArtifact, self).__init__() self.case_sensitive = case_sensitive self.name = name self.value = value
287,939
Initializes a hostname artifact. Args: name (Optional[str]): name of the host according to the naming schema. schema (Optional[str]): naming schema.
def __init__(self, name=None, schema='DNS'): super(HostnameArtifact, self).__init__() self.name = name self.schema = schema
287,940
Determines if 2 operating system artifacts are equivalent. This function compares the operating systems based in order of: * name derived from product * family and version * family Args: other (OperatingSystemArtifact): operating system artifact attribute container to compare with. Returns: bool: True if the operating systems are considered equivalent, False if the most specific criteria do no match, or no criteria are available.
def IsEquivalent(self, other): if self.name and other.name: return self.name == other.name if self.name: self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get( self.name, self._DEFAULT_FAMILY_AND_VERSION) return ( self_family == other.family and self_version_tuple == other.version_tuple) if self.family and self.version: if other.name: other_family, other_version_tuple = ( self._FAMILY_AND_VERSION_PER_NAME.get( other.name, self._DEFAULT_FAMILY_AND_VERSION)) else: other_family = other.family other_version_tuple = other.version_tuple return ( self.family == other_family and self.version_tuple == other_version_tuple) if self.family: if other.name: other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get( other.name, self._DEFAULT_FAMILY_AND_VERSION) else: other_family = other.family return self.family == other_family return False
287,944
Initializes a system configuration artifact. Args: code_page (Optional[str]): system code page. time_zone (Optional[str]): system time zone.
def __init__(self, code_page=None, time_zone=None): super(SystemConfigurationArtifact, self).__init__() self.code_page = code_page self.hostname = None self.keyboard_layout = None self.operating_system = None self.operating_system_product = None self.operating_system_version = None self.time_zone = time_zone self.user_accounts = []
287,945
Initializes an user artifact. Args: full_name (Optional[str]): name describing the user e.g. full name. group_identifier (Optional[str]): identifier of the primary group the user is part of. identifier (Optional[str]): user identifier. path_separator (Optional[str]): path segment separator. user_directory (Optional[str]): path of the user (or home or profile) directory. username (Optional[str]): name uniquely identifying the user.
def __init__( self, full_name=None, group_identifier=None, identifier=None, path_separator='/', user_directory=None, username=None): super(UserAccountArtifact, self).__init__() self._path_separator = path_separator self.full_name = full_name self.group_identifier = group_identifier self.identifier = identifier # TODO: add shell. self.user_directory = user_directory self.username = username
287,946
Tries to determine the underlying operating system. Args: searcher (dfvfs.FileSystemSearcher): file system searcher. Returns: str: operating system for example "Windows". This should be one of the values in definitions.OPERATING_SYSTEM_FAMILIES.
def _DetermineOperatingSystem(self, searcher): find_specs = [ file_system_searcher.FindSpec( location='/etc', case_sensitive=False), file_system_searcher.FindSpec( location='/System/Library', case_sensitive=False), file_system_searcher.FindSpec( location='/Windows/System32', case_sensitive=False), file_system_searcher.FindSpec( location='/WINNT/System32', case_sensitive=False), file_system_searcher.FindSpec( location='/WINNT35/System32', case_sensitive=False), file_system_searcher.FindSpec( location='/WTSRV/System32', case_sensitive=False)] locations = [] for path_spec in searcher.Find(find_specs=find_specs): relative_path = searcher.GetRelativePath(path_spec) if relative_path: locations.append(relative_path.lower()) # We need to check for both forward and backward slashes since the path # spec will be OS dependent, as in running the tool on Windows will return # Windows paths (backward slash) vs. forward slash on *NIX systems. windows_locations = set([ '/windows/system32', '\\windows\\system32', '/winnt/system32', '\\winnt\\system32', '/winnt35/system32', '\\winnt35\\system32', '\\wtsrv\\system32', '/wtsrv/system32']) operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN if windows_locations.intersection(set(locations)): operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT elif '/system/library' in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS elif '/etc' in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX return operating_system
287,948
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration.
def _StartProfiling(self, configuration): if not configuration: return if configuration.HaveProfileMemoryGuppy(): self._guppy_memory_profiler = profilers.GuppyMemoryProfiler( self._name, configuration) self._guppy_memory_profiler.Start() if configuration.HaveProfileMemory(): self._memory_profiler = profilers.MemoryProfiler( self._name, configuration) self._memory_profiler.Start() if configuration.HaveProfileProcessing(): identifier = '{0:s}-processing'.format(self._name) self._processing_profiler = profilers.ProcessingProfiler( identifier, configuration) self._processing_profiler.Start() if configuration.HaveProfileSerializers(): identifier = '{0:s}-serializers'.format(self._name) self._serializers_profiler = profilers.SerializersProfiler( identifier, configuration) self._serializers_profiler.Start() if configuration.HaveProfileStorage(): self._storage_profiler = profilers.StorageProfiler( self._name, configuration) self._storage_profiler.Start() if configuration.HaveProfileTaskQueue(): self._task_queue_profiler = profilers.TaskQueueProfiler( self._name, configuration) self._task_queue_profiler.Start()
287,949
Preprocesses the sources. Args: artifacts_registry_object (artifacts.ArtifactDefinitionsRegistry): artifact definitions registry. source_path_specs (list[dfvfs.PathSpec]): path specifications of the sources to process. resolver_context (Optional[dfvfs.Context]): resolver context.
def PreprocessSources( self, artifacts_registry_object, source_path_specs, resolver_context=None): detected_operating_systems = [] for source_path_spec in source_path_specs: try: file_system, mount_point = self.GetSourceFileSystem( source_path_spec, resolver_context=resolver_context) except (RuntimeError, dfvfs_errors.BackEndError) as exception: logger.error(exception) continue try: searcher = file_system_searcher.FileSystemSearcher( file_system, mount_point) operating_system = self._DetermineOperatingSystem(searcher) if operating_system != definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN: preprocess_manager.PreprocessPluginsManager.RunPlugins( artifacts_registry_object, file_system, mount_point, self.knowledge_base) detected_operating_systems.append(operating_system) finally: file_system.Close() if detected_operating_systems: logger.info('Preprocessing detected operating systems: {0:s}'.format( ', '.join(detected_operating_systems))) self.knowledge_base.SetValue( 'operating_system', detected_operating_systems[0])
287,951
Build Find Specs from artifacts or filter file if available. Args: artifact_definitions_path (str): path to artifact definitions file. custom_artifacts_path (str): path to custom artifact definitions file. Returns: artifacts.ArtifactDefinitionsRegistry: artifact definitions registry. Raises: RuntimeError: if no valid FindSpecs are built.
def BuildArtifactsRegistry( cls, artifact_definitions_path, custom_artifacts_path): if artifact_definitions_path and not os.path.isdir( artifact_definitions_path): raise errors.BadConfigOption( 'No such artifacts filter file: {0:s}.'.format( artifact_definitions_path)) if custom_artifacts_path and not os.path.isfile(custom_artifacts_path): raise errors.BadConfigOption( 'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path)) registry = artifacts_registry.ArtifactDefinitionsRegistry() reader = artifacts_reader.YamlArtifactsReader() try: registry.ReadFromDirectory(reader, artifact_definitions_path) except (KeyError, artifacts_errors.FormatError) as exception: raise errors.BadConfigOption(( 'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}').format(artifact_definitions_path, exception)) if custom_artifacts_path: try: registry.ReadFromFile(reader, custom_artifacts_path) except (KeyError, artifacts_errors.FormatError) as exception: raise errors.BadConfigOption(( 'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}').format(custom_artifacts_path, exception)) return registry
287,953
Retrieves the timestamps from an OLECF item. Args: olecf_item (pyolecf.item): OLECF item. Returns: tuple[int, int]: creation and modification FILETIME timestamp.
def _GetTimestamps(self, olecf_item): if not olecf_item: return None, None try: creation_time = olecf_item.get_creation_time_as_integer() except OverflowError as exception: logger.warning( 'Unable to read the creation time with error: {0!s}'.format( exception)) creation_time = 0 try: modification_time = olecf_item.get_modification_time_as_integer() except OverflowError as exception: logger.warning( 'Unable to read the modification time with error: {0!s}'.format( exception)) modification_time = 0 # If no useful events, return early. if not creation_time and not modification_time: return None, None # Office template documents sometimes contain a creation time # of -1 (0xffffffffffffffff). if creation_time == 0xffffffffffffffff: creation_time = 0 return creation_time, modification_time
287,954
Retrieves a list of a hasher names from a comma separated string. Takes a string of comma separated hasher names transforms it to a list of hasher names. Args: hasher_names_string (str): comma separated names of hashers to enable, the string 'all' to enable all hashers or 'none' to disable all hashers. Returns: list[str]: names of valid hashers from the string, or an empty list if no valid names are found.
def GetHasherNamesFromString(cls, hasher_names_string): hasher_names = [] if not hasher_names_string or hasher_names_string.strip() == 'none': return hasher_names if hasher_names_string.strip() == 'all': return cls.GetHasherNames() for hasher_name in hasher_names_string.split(','): hasher_name = hasher_name.strip() if not hasher_name: continue hasher_name = hasher_name.lower() if hasher_name in cls._hasher_classes: hasher_names.append(hasher_name) return hasher_names
287,955
Retrieves an instance of a specific hasher. Args: hasher_name (str): the name of the hasher to retrieve. Returns: BaseHasher: hasher. Raises: KeyError: if hasher class is not set for the corresponding name.
def GetHasher(cls, hasher_name): hasher_name = hasher_name.lower() if hasher_name not in cls._hasher_classes: raise KeyError( 'hasher class not set for name: {0:s}.'.format(hasher_name)) hasher_class = cls._hasher_classes[hasher_name] return hasher_class()
287,957
Retrieves instances for all the specified hashers. Args: hasher_names (list[str]): names of the hashers to retrieve. Returns: list[BaseHasher]: hashers.
def GetHashers(cls, hasher_names): hashers = [] for hasher_name, hasher_class in iter(cls._hasher_classes.items()): if hasher_name in hasher_names: hashers.append(hasher_class()) return hashers
287,958
Retrieves the registered hashers. Args: hasher_names (list[str]): names of the hashers to retrieve. Yields: tuple: containing: str: parser name type: next hasher class.
def GetHasherClasses(cls, hasher_names=None): for hasher_name, hasher_class in iter(cls._hasher_classes.items()): if not hasher_names or hasher_name in hasher_names: yield hasher_name, hasher_class
287,959
Registers a hasher class. The hasher classes are identified based on their lower case name. Args: hasher_class (type): class object of the hasher. Raises: KeyError: if hasher class is already set for the corresponding name.
def RegisterHasher(cls, hasher_class): hasher_name = hasher_class.NAME.lower() if hasher_name in cls._hasher_classes: raise KeyError(( 'hasher class already set for name: {0:s}.').format( hasher_class.NAME)) cls._hasher_classes[hasher_name] = hasher_class
287,960
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: if the collection file does not exist.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') filter_file = cls._ParseStringOption(options, 'file_filter') # Search the data location for the filter file. if filter_file and not os.path.isfile(filter_file): data_location = getattr(configuration_object, '_data_location', None) if data_location: filter_file_basename = os.path.basename(filter_file) filter_file_path = os.path.join(data_location, filter_file_basename) if os.path.isfile(filter_file_path): filter_file = filter_file_path if filter_file and not os.path.isfile(filter_file): raise errors.BadConfigOption( 'No such collection filter file: {0:s}.'.format(filter_file)) setattr(configuration_object, '_filter_file', filter_file)
287,961
Retrieves an ISO8601 date time string from the structure. The date and time values in the SCCM log are formatted as: time="19:33:19.766-330" date="11-28-2014" Args: structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Returns: str: ISO 8601 date time string. Raises: ValueError: if the structure cannot be converted into a date time string.
def _GetISO8601String(self, structure): fraction_of_second_length = len(structure.fraction_of_second) if fraction_of_second_length not in (3, 6, 7): raise ValueError( 'unsupported time fraction of second length: {0:d}'.format( fraction_of_second_length)) try: fraction_of_second = int(structure.fraction_of_second, 10) except (TypeError, ValueError) as exception: raise ValueError( 'unable to determine fraction of second with error: {0!s}'.format( exception)) # TODO: improve precision support, but for now ignore the 100ns precision. if fraction_of_second_length == 7: fraction_of_second, _ = divmod(fraction_of_second, 10) date_time_string = '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}'.format( structure.year, structure.month, structure.day, structure.hour, structure.minute, structure.second) if fraction_of_second_length > 0: date_time_string = '{0:s}.{1:d}'.format( date_time_string, fraction_of_second) utc_offset_minutes = structure.get('utc_offset_minutes', None) if utc_offset_minutes is not None: try: time_zone_offset = int(utc_offset_minutes[1:], 10) except (IndexError, ValueError) as exception: raise ValueError( 'Unable to parse time zone offset with error: {0!s}.'.format( exception)) time_zone_hours, time_zone_minutes = divmod(time_zone_offset, 60) date_time_string = '{0:s}{1:s}{2:02d}:{3:02d}'.format( date_time_string, utc_offset_minutes[0], time_zone_hours, time_zone_minutes) return date_time_string
287,963
Parse the record and return an SCCM log event object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
def ParseRecord(self, parser_mediator, key, structure): if key not in ( 'log_entry', 'log_entry_at_end', 'log_entry_offset', 'log_entry_offset_at_end'): raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) try: date_time_string = self._GetISO8601String(structure) except ValueError as exception: parser_mediator.ProduceExtractionWarning( 'unable to determine date time string with error: {0!s}'.format( exception)) fraction_of_second_length = len(structure.fraction_of_second) if fraction_of_second_length == 3: date_time = dfdatetime_time_elements.TimeElementsInMilliseconds() elif fraction_of_second_length in (6, 7): date_time = dfdatetime_time_elements.TimeElementsInMicroseconds() try: date_time.CopyFromStringISO8601(date_time_string) except ValueError as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse date time value: {0:s} with error: {1!s}'.format( date_time_string, exception)) return event_data = SCCMLogEventData() event_data.component = structure.component # TODO: pass line number to offset or remove. event_data.offset = 0 event_data.text = structure.text event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
287,964
Verifies whether content corresponds to an SCCM log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise.
def VerifyStructure(self, parser_mediator, lines): # Identify the token to which we attempt a match. match = self._PARSING_COMPONENTS['msg_left_delimiter'].match # Because logs files can lead with a partial event, # we can't assume that the first character (post-BOM) # in the file is the beginning of our match - so we # look for match anywhere in lines. return match in lines
287,965
Initializes the token object. Args: state_regex: If this regular expression matches the current state this rule is considered. regex: A regular expression to try and match from the current point. actions: A command separated list of method names in the Lexer to call. next_state: The next state we transition to if this Token matches. flags: re flags.
def __init__(self, state_regex, regex, actions, next_state, flags=re.I): self.state_regex = re.compile( state_regex, re.DOTALL | re.M | re.S | re.U | flags) self.regex = re.compile(regex, re.DOTALL | re.M | re.S | re.U | flags) self.re_str = regex self.actions = [] if actions: self.actions = actions.split(',') self.next_state = next_state
287,966