docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Extracts relevant Airport entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): if 'RememberedNetworks' not in match: return for wifi in match['RememberedNetworks']: ssid = wifi.get('SSIDString', 'UNKNOWN_SSID') security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE') event_data = plist_event.PlistTimeEventData() event_data.desc = ( '[WiFi] Connected to network: <{0:s}> using security {1:s}').format( ssid, security_type) event_data.key = 'item' event_data.root = '/RememberedNetworks' datetime_value = wifi.get('LastConnected', None) if datetime_value: event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
287,670
Converts a specific value of the row to an integer. Args: row (dict[str, str]): fields of a single row, as specified in COLUMNS. value_name (str): name of the value within the row. Returns: int: value or None if the value cannot be converted.
def _GetIntegerValue(self, row, value_name): value = row.get(value_name, None) try: return int(value, 10) except (TypeError, ValueError): return None
287,672
Parses a line of the log file and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row_offset (int): number of the corresponding line. row (dict[str, str]): fields of a single row, as specified in COLUMNS.
def ParseRow(self, parser_mediator, row_offset, row): filename = row.get('name', None) md5_hash = row.get('md5', None) mode = row.get('mode_as_string', None) inode_number = row.get('inode', None) if '-' in inode_number: inode_number, _, _ = inode_number.partition('-') try: inode_number = int(inode_number, 10) except (TypeError, ValueError): inode_number = None data_size = self._GetIntegerValue(row, 'size') user_uid = self._GetIntegerValue(row, 'uid') user_gid = self._GetIntegerValue(row, 'gid') event_data = MactimeEventData() event_data.filename = filename event_data.inode = inode_number event_data.md5 = md5_hash event_data.mode_as_string = mode event_data.offset = row_offset event_data.size = data_size event_data.user_gid = user_gid if user_uid is None: event_data.user_sid = None else: # Note that the user_sid value is expected to be a string. event_data.user_sid = '{0:d}'.format(user_uid) for value_name, timestamp_description in iter( self._TIMESTAMP_DESC_MAP.items()): posix_time = self._GetIntegerValue(row, value_name) # mactime will return 0 if the timestamp is not set. if not posix_time: continue date_time = dfdatetime_posix_time.PosixTime(timestamp=posix_time) event = time_events.DateTimeValuesEvent(date_time, timestamp_description) parser_mediator.ProduceEventWithEventData(event, event_data)
287,673
Verifies if a line of the file is in the expected format. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: bool: True if this is the correct parser, False otherwise.
def VerifyRow(self, parser_mediator, row): # Sleuthkit version 3 format: # MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime # 0|/lost+found|11|d/drwx------|0|0|12288|1337961350|1337961350|1337961350|0 if row['md5'] != '0' and not self._MD5_RE.match(row['md5']): return False # Check if the following columns contain a base 10 integer value if set. for column_name in ( 'uid', 'gid', 'size', 'atime', 'mtime', 'ctime', 'crtime'): column_value = row.get(column_name, None) if not column_value: continue try: int(column_value, 10) except (TypeError, ValueError): return False return True
287,674
Parses an autofill entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseAutofillRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = ChromeAutofillEventData() event_data.field_name = self._GetRowValue(query_hash, row, 'name') event_data.value = self._GetRowValue(query_hash, row, 'value') event_data.usage_count = self._GetRowValue(query_hash, row, 'count') event_data.query = query # Create one event for the first time an autofill entry was used timestamp = self._GetRowValue(query_hash, row, 'date_created') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) # If the autofill value has been used more than once, create another # event for the most recent time it was used. if event_data.usage_count > 1: timestamp = self._GetRowValue(query_hash, row, 'date_last_used') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_USED) parser_mediator.ProduceEventWithEventData(event, event_data)
287,676
Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout.
def _AbortJoin(self, timeout=None): for pid, process in iter(self._processes_per_pid.items()): logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format( process.name, pid)) process.join(timeout=timeout) if not process.is_alive(): logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format( process.name, pid))
287,678
Checks the status of a worker process. If a worker process is not responding the process is terminated and a replacement process is started. Args: pid (int): process ID (PID) of a registered worker process. Raises: KeyError: if the process is not registered with the engine.
def _CheckStatusWorkerProcess(self, pid): # TODO: Refactor this method, simplify and separate concerns (monitoring # vs management). self._RaiseIfNotRegistered(pid) process = self._processes_per_pid[pid] process_status = self._QueryProcessStatus(process) if process_status is None: process_is_alive = False else: process_is_alive = True process_information = self._process_information_per_pid[pid] used_memory = process_information.GetUsedMemory() or 0 if self._worker_memory_limit and used_memory > self._worker_memory_limit: logger.warning(( 'Process: {0:s} (PID: {1:d}) killed because it exceeded the ' 'memory limit: {2:d}.').format( process.name, pid, self._worker_memory_limit)) self._KillProcess(pid) if isinstance(process_status, dict): self._rpc_errors_per_pid[pid] = 0 status_indicator = process_status.get('processing_status', None) else: rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1 self._rpc_errors_per_pid[pid] = rpc_errors if rpc_errors > self._MAXIMUM_RPC_ERRORS: process_is_alive = False if process_is_alive: rpc_port = process.rpc_port.value logger.warning(( 'Unable to retrieve process: {0:s} (PID: {1:d}) status via ' 'RPC socket: http://localhost:{2:d}').format( process.name, pid, rpc_port)) processing_status_string = 'RPC error' status_indicator = definitions.STATUS_INDICATOR_RUNNING else: processing_status_string = 'killed' status_indicator = definitions.STATUS_INDICATOR_KILLED process_status = { 'processing_status': processing_status_string} self._UpdateProcessingStatus(pid, process_status, used_memory) # _UpdateProcessingStatus can also change the status of the worker, # So refresh the status if applicable. for worker_status in self._processing_status.workers_status: if worker_status.pid == pid: status_indicator = worker_status.status break if status_indicator in definitions.ERROR_STATUS_INDICATORS: logger.error(( 'Process {0:s} (PID: {1:d}) is not functioning correctly. ' 'Status code: {2!s}.').format(process.name, pid, status_indicator)) self._TerminateProcessByPid(pid) replacement_process = None for replacement_process_attempt in range( self._MAXIMUM_REPLACEMENT_RETRIES): logger.info(( 'Attempt: {0:d} to start replacement worker process for ' '{1:s}').format(replacement_process_attempt + 1, process.name)) replacement_process = self._StartWorkerProcess( process.name, self._storage_writer) if replacement_process: break time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY) if not replacement_process: logger.error( 'Unable to create replacement worker process for: {0:s}'.format( process.name))
287,681
Issues a SIGKILL or equivalent to the process. Args: pid (int): process identifier (PID).
def _KillProcess(self, pid): if sys.platform.startswith('win'): process_terminate = 1 handle = ctypes.windll.kernel32.OpenProcess( process_terminate, False, pid) ctypes.windll.kernel32.TerminateProcess(handle, -1) ctypes.windll.kernel32.CloseHandle(handle) else: try: os.kill(pid, signal.SIGKILL) except OSError as exception: logger.error('Unable to kill process {0:d} with error: {1!s}'.format( pid, exception))
287,682
Queries a process to determine its status. Args: process (MultiProcessBaseProcess): process to query for its status. Returns: dict[str, str]: status values received from the worker process.
def _QueryProcessStatus(self, process): process_is_alive = process.is_alive() if process_is_alive: rpc_client = self._rpc_clients_per_pid.get(process.pid, None) process_status = rpc_client.CallFunction() else: process_status = None return process_status
287,683
Registers a process with the engine. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is already registered with the engine. ValueError: if the process is missing.
def _RegisterProcess(self, process): if process is None: raise ValueError('Missing process.') if process.pid in self._processes_per_pid: raise KeyError( 'Already managing process: {0!s} (PID: {1:d})'.format( process.name, process.pid)) self._processes_per_pid[process.pid] = process
287,684
Starts monitoring a process. Args: process (MultiProcessBaseProcess): process. Raises: IOError: if the RPC client cannot connect to the server. KeyError: if the process is not registered with the engine or if the process is already being monitored. OSError: if the RPC client cannot connect to the server. ValueError: if the process is missing.
def _StartMonitoringProcess(self, process): if process is None: raise ValueError('Missing process.') pid = process.pid if pid in self._process_information_per_pid: raise KeyError( 'Already monitoring process (PID: {0:d}).'.format(pid)) if pid in self._rpc_clients_per_pid: raise KeyError( 'RPC client (PID: {0:d}) already exists'.format(pid)) rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient() # Make sure that a worker process has started its RPC server. # The RPC port will be 0 if no server is available. rpc_port = process.rpc_port.value time_waited_for_process = 0.0 while not rpc_port: time.sleep(0.1) rpc_port = process.rpc_port.value time_waited_for_process += 0.1 if time_waited_for_process >= self._RPC_SERVER_TIMEOUT: raise IOError( 'RPC client unable to determine server (PID: {0:d}) port.'.format( pid)) hostname = 'localhost' if not rpc_client.Open(hostname, rpc_port): raise IOError(( 'RPC client unable to connect to server (PID: {0:d}) ' 'http://{1:s}:{2:d}').format(pid, hostname, rpc_port)) self._rpc_clients_per_pid[pid] = rpc_client self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
287,685
Stops monitoring a process. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is not monitored. ValueError: if the process is missing.
def _StopMonitoringProcess(self, process): if process is None: raise ValueError('Missing process.') pid = process.pid self._RaiseIfNotMonitored(pid) del self._process_information_per_pid[pid] rpc_client = self._rpc_clients_per_pid.get(pid, None) if rpc_client: rpc_client.Close() del self._rpc_clients_per_pid[pid] if pid in self._rpc_errors_per_pid: del self._rpc_errors_per_pid[pid] logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format( process.name, pid))
287,687
Terminate a process that's monitored by the engine. Args: pid (int): process identifier (PID). Raises: KeyError: if the process is not registered with and monitored by the engine.
def _TerminateProcessByPid(self, pid): self._RaiseIfNotRegistered(pid) process = self._processes_per_pid[pid] self._TerminateProcess(process) self._StopMonitoringProcess(process)
287,690
Terminate a process. Args: process (MultiProcessBaseProcess): process to terminate.
def _TerminateProcess(self, process): pid = process.pid logger.warning('Terminating process: (PID: {0:d}).'.format(pid)) process.terminate() # Wait for the process to exit. process.join(timeout=self._PROCESS_JOIN_TIMEOUT) if process.is_alive(): logger.warning('Killing process: (PID: {0:d}).'.format(pid)) self._KillProcess(pid)
287,691
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '-o', '--output_format', '--output-format', metavar='FORMAT', dest='output_format', default='dynamic', help=( 'The output format. Use "-o list" to see a list of available ' 'output formats.')) argument_group.add_argument( '-w', '--write', metavar='OUTPUT_FILE', dest='write', help='Output filename.') # TODO: determine if this is repeated elsewhere and refactor this into # a helper function. arguments = sys.argv[1:] argument_index = 0 if '-o' in arguments: argument_index = arguments.index('-o') + 1 elif '--output_format' in arguments: argument_index = arguments.index('--output_format') + 1 elif '--output-format' in arguments: argument_index = arguments.index('--output-format') + 1 if 0 < argument_index < len(arguments): names = [name.strip() for name in arguments[argument_index].split(',')] else: names = ['dynamic'] if names and names != ['list']: manager.ArgumentHelperManager.AddCommandLineArguments( argument_group, category='output', names=names)
287,692
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') output_format = getattr(options, 'output_format', 'dynamic') output_filename = getattr(options, 'write', None) if output_format != 'list': if not output_manager.OutputManager.HasOutputClass(output_format): raise errors.BadConfigOption( 'Unsupported output format: {0:s}.'.format(output_format)) if output_manager.OutputManager.IsLinearOutputModule(output_format): if not output_filename: raise errors.BadConfigOption(( 'Output format: {0:s} requires an output file').format( output_format)) if os.path.exists(output_filename): raise errors.BadConfigOption( 'Output file already exists: {0:s}.'.format(output_filename)) setattr(configuration_object, '_output_format', output_format) setattr(configuration_object, '_output_filename', output_filename)
287,693
Verify that a number is within a defined range. This is a callback method for pyparsing setParseAction that verifies that a read number is within a certain range. To use this method it needs to be defined as a callback method in setParseAction with the upper and lower bound set as parameters. Args: lower_bound (int): lower bound of the range. upper_bound (int): upper bound of the range. Returns: Function: callback method that can be used by pyparsing setParseAction.
def PyParseRangeCheck(lower_bound, upper_bound): # pylint: disable=unused-argument def CheckRange(string, location, tokens): try: check_number = tokens[0] except IndexError: check_number = -1 if check_number < lower_bound: raise pyparsing.ParseException( 'Value: {0:d} precedes lower bound: {1:d}'.format( check_number, lower_bound)) if check_number > upper_bound: raise pyparsing.ParseException( 'Value: {0:d} exceeds upper bound: {1:d}'.format( check_number, upper_bound)) # Since callback methods for pyparsing need to accept certain parameters # and there is no way to define conditions, like upper and lower bounds # we need to return here a method that accepts those pyparsing parameters. return CheckRange
287,694
Return an integer from a string. This is a pyparsing callback method that converts the matched string into an integer. The method modifies the content of the tokens list and converts them all to an integer value. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored.
def PyParseIntCast(string, location, tokens): # Cast the regular tokens. for index, token in enumerate(tokens): try: tokens[index] = int(token) except ValueError: logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format( token)) tokens[index] = 0 # We also need to cast the dictionary built tokens. for key in tokens.keys(): try: tokens[key] = int(tokens[key], 10) except ValueError: logger.error( 'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format( key, tokens[key])) tokens[key] = 0
287,695
Return a joined token from a list of tokens. This is a callback method for pyparsing setParseAction that modifies the returned token list to join all the elements in the list to a single token. Args: string (str): original string. location (int): location in the string where the match was made. tokens (list[str]): extracted tokens, where the string to be converted is stored.
def PyParseJoinList(string, location, tokens): join_list = [] for token in tokens: try: join_list.append(str(token)) except UnicodeDecodeError: join_list.append(repr(token)) tokens[0] = ''.join(join_list) del tokens[1:]
287,696
Parses a text file-like object using a pyparsing definition. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): # TODO: self._line_structures is a work-around and this needs # a structural fix. if not self._line_structures: raise errors.UnableToParseFile( 'Line structure undeclared, unable to proceed.') encoding = self._ENCODING or parser_mediator.codepage text_file_object = text_file.TextFile(file_object, encoding=encoding) try: line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH) except UnicodeDecodeError: raise errors.UnableToParseFile( 'Not a text file or encoding not supported.') if not line: raise errors.UnableToParseFile('Not a text file.') if len(line) == self.MAX_LINE_LENGTH or len( line) == self.MAX_LINE_LENGTH - 1: logger.debug(( 'Trying to read a line and reached the maximum allowed length of ' '{0:d}. The last few bytes of the line are: {1:s} [parser ' '{2:s}]').format( self.MAX_LINE_LENGTH, repr(line[-10:]), self.NAME)) if not self._IsText(line): raise errors.UnableToParseFile('Not a text file, unable to proceed.') if not self.VerifyStructure(parser_mediator, line): raise errors.UnableToParseFile('Wrong file structure.') consecutive_line_failures = 0 index = None # Set the offset to the beginning of the file. self._current_offset = 0 # Read every line in the text file. while line: if parser_mediator.abort: break parsed_structure = None use_key = None # Try to parse the line using all the line structures. for index, (key, structure) in enumerate(self._line_structures): try: parsed_structure = structure.parseString(line) except pyparsing.ParseException: pass if parsed_structure: use_key = key break if parsed_structure: self.ParseRecord(parser_mediator, use_key, parsed_structure) consecutive_line_failures = 0 if index is not None and index != 0: key_structure = self._line_structures.pop(index) self._line_structures.insert(0, key_structure) else: if len(line) > 80: line = '{0:s}...'.format(line[:77]) parser_mediator.ProduceExtractionWarning( 'unable to parse log line: {0:s} at offset: {1:d}'.format( repr(line), self._current_offset)) consecutive_line_failures += 1 if (consecutive_line_failures > self.MAXIMUM_CONSECUTIVE_LINE_FAILURES): raise errors.UnableToParseFile( 'more than {0:d} consecutive failures to parse lines.'.format( self.MAXIMUM_CONSECUTIVE_LINE_FAILURES)) self._current_offset = text_file_object.get_offset() try: line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH) except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to read and decode log line at offset {0:d}'.format( self._current_offset)) break
287,700
Initializes the encoded text reader object. Args: encoding (str): encoding. buffer_size (Optional[int]): buffer size.
def __init__(self, encoding, buffer_size=2048): super(EncodedTextReader, self).__init__() self._buffer = '' self._buffer_size = buffer_size self._current_offset = 0 self._encoding = encoding self.lines = ''
287,701
Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object.
def _ReadLine(self, file_object): if len(self._buffer) < self._buffer_size: content = file_object.read(self._buffer_size) content = content.decode(self._encoding) self._buffer = ''.join([self._buffer, content]) line, new_line, self._buffer = self._buffer.partition('\n') if not line and not new_line: line = self._buffer self._buffer = '' self._current_offset += len(line) # Strip carriage returns from the text. if line.endswith('\r'): line = line[:-len('\r')] if new_line: line = ''.join([line, '\n']) self._current_offset += len('\n') return line
287,702
Reads a line. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the lines buffer.
def ReadLine(self, file_object): line, _, self.lines = self.lines.partition('\n') if not line: self.ReadLines(file_object) line, _, self.lines = self.lines.partition('\n') return line
287,703
Reads lines into the lines buffer. Args: file_object (dfvfs.FileIO): file-like object.
def ReadLines(self, file_object): lines_size = len(self.lines) if lines_size < self._buffer_size: lines_size = self._buffer_size - lines_size while lines_size > 0: line = self._ReadLine(file_object) if not line: break self.lines = ''.join([self.lines, line]) lines_size -= len(line)
287,704
Skips ahead a number of characters. Args: file_object (dfvfs.FileIO): file-like object. number_of_characters (int): number of characters.
def SkipAhead(self, file_object, number_of_characters): lines_size = len(self.lines) while number_of_characters >= lines_size: number_of_characters -= lines_size self.lines = '' self.ReadLines(file_object) lines_size = len(self.lines) if lines_size == 0: return self.lines = self.lines[number_of_characters:]
287,705
Parses a text file-like object using a pyparsing definition. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): if not self.LINE_STRUCTURES: raise errors.UnableToParseFile('Missing line structures.') encoding = self._ENCODING or parser_mediator.codepage text_reader = EncodedTextReader( encoding, buffer_size=self.BUFFER_SIZE) text_reader.Reset() try: text_reader.ReadLines(file_object) except UnicodeDecodeError as exception: raise errors.UnableToParseFile( 'Not a text file, with error: {0!s}'.format(exception)) if not self.VerifyStructure(parser_mediator, text_reader.lines): raise errors.UnableToParseFile('Wrong file structure.') # Using parseWithTabs() overrides Pyparsing's default replacement of tabs # with spaces to SkipAhead() the correct number of bytes after a match. for key, structure in self.LINE_STRUCTURES: structure.parseWithTabs() consecutive_line_failures = 0 # Read every line in the text file. while text_reader.lines: if parser_mediator.abort: break # Initialize pyparsing objects. tokens = None start = 0 end = 0 key = None index = None # Try to parse the line using all the line structures. for index, (key, structure) in enumerate(self._line_structures): try: structure_generator = structure.scanString( text_reader.lines, maxMatches=1) parsed_structure = next(structure_generator, None) except pyparsing.ParseException: parsed_structure = None if not parsed_structure: continue tokens, start, end = parsed_structure # Only want to parse the structure if it starts # at the beginning of the buffer. if start == 0: break if tokens and start == 0: # Move matching key, structure pair to the front of the list, so that # structures that are more likely to match are tried first. if index is not None and index != 0: key_structure = self._line_structures.pop(index) self._line_structures.insert(0, key_structure) try: self.ParseRecord(parser_mediator, key, tokens) consecutive_line_failures = 0 except (errors.ParseError, errors.TimestampError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse record: {0:s} with error: {1!s}'.format( key, exception)) text_reader.SkipAhead(file_object, end) else: odd_line = text_reader.ReadLine(file_object) if odd_line: if len(odd_line) > 80: odd_line = '{0:s}...'.format(odd_line[:77]) parser_mediator.ProduceExtractionWarning( 'unable to parse log line: {0:s}'.format(repr(odd_line))) consecutive_line_failures += 1 if (consecutive_line_failures > self.MAXIMUM_CONSECUTIVE_LINE_FAILURES): raise errors.UnableToParseFile( 'more than {0:d} consecutive failures to parse lines.'.format( self.MAXIMUM_CONSECUTIVE_LINE_FAILURES)) try: text_reader.ReadLines(file_object) except UnicodeDecodeError as exception: parser_mediator.ProduceExtractionWarning( 'unable to read lines with error: {0!s}'.format(exception))
287,707
Extracts Safari history items. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): format_version = match.get('WebHistoryFileVersion', None) if format_version != 1: parser_mediator.ProduceExtractionWarning( 'unsupported Safari history version: {0!s}'.format(format_version)) return if 'WebHistoryDates' not in match: return for history_entry in match.get('WebHistoryDates', {}): last_visited_date = history_entry.get('lastVisitedDate', None) if last_visited_date is None: parser_mediator.ProduceExtractionWarning('missing last visited date') continue try: # Last visited date is a string containing a floating point value. timestamp = float(last_visited_date) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'unable to convert last visited date {0:s}'.format( last_visited_date)) continue display_title = history_entry.get('displayTitle', None) event_data = SafariHistoryEventData() if display_title != event_data.title: event_data.display_title = display_title event_data.title = history_entry.get('title', None) event_data.url = history_entry.get('', None) event_data.visit_count = history_entry.get('visitCount', None) event_data.was_http_non_get = history_entry.get( 'lastVisitWasHTTPNonGet', None) # Convert the floating point value to an integer. # TODO: add support for the fractional part of the floating point value. timestamp = int(timestamp) date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
287,711
Deregisters a analyzer class. The analyzer classes are identified based on their lower case name. Args: analyzer_class (type): class object of the analyzer. Raises: KeyError: if analyzer class is not set for the corresponding name.
def DeregisterAnalyzer(cls, analyzer_class): analyzer_name = analyzer_class.NAME.lower() if analyzer_name not in cls._analyzer_classes: raise KeyError('analyzer class not set for name: {0:s}'.format( analyzer_class.NAME)) del cls._analyzer_classes[analyzer_name]
287,712
Retrieves an instance of a specific analyzer. Args: analyzer_name (str): name of the analyzer to retrieve. Returns: BaseAnalyzer: analyzer instance. Raises: KeyError: if analyzer class is not set for the corresponding name.
def GetAnalyzerInstance(cls, analyzer_name): analyzer_name = analyzer_name.lower() if analyzer_name not in cls._analyzer_classes: raise KeyError( 'analyzer class not set for name: {0:s}.'.format(analyzer_name)) analyzer_class = cls._analyzer_classes[analyzer_name] return analyzer_class()
287,714
Retrieves instances for all the specified analyzers. Args: analyzer_names (list[str]): names of the analyzers to retrieve. Returns: list[BaseAnalyzer]: analyzer instances.
def GetAnalyzerInstances(cls, analyzer_names): analyzer_instances = [] for analyzer_name, analyzer_class in iter(cls.GetAnalyzers()): if analyzer_name in analyzer_names: analyzer_instances.append(analyzer_class()) return analyzer_instances
287,715
Analyzes an event and creates extracts hashes as required. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
def ExamineEvent(self, mediator, event): pathspec = getattr(event, 'pathspec', None) if pathspec is None: return if self._paths_with_hashes.get(pathspec, None): # We've already processed an event with this pathspec and extracted the # hashes from it. return hash_attributes = {} for attribute_name, attribute_value in event.GetAttributes(): if attribute_name.endswith('_hash'): hash_attributes[attribute_name] = attribute_value self._paths_with_hashes[pathspec] = hash_attributes
287,717
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: report.
def CompileReport(self, mediator): lines_of_text = ['Listing file paths and hashes'] for pathspec, hashes in sorted( self._paths_with_hashes.items(), key=lambda tuple: tuple[0].comparable): path_string = self._GeneratePathString(mediator, pathspec, hashes) lines_of_text.append(path_string) lines_of_text.append('') report_text = '\n'.join(lines_of_text) return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
287,719
Parses the event data form a variable-length data section. Args: variable_length_section (job_variable_length_data_section): a Windows Scheduled Task job variable-length data section. Returns: WinJobEventData: event data of the job file.
def _ParseEventData(self, variable_length_section): event_data = WinJobEventData() event_data.application = ( variable_length_section.application_name.rstrip('\x00')) event_data.comment = variable_length_section.comment.rstrip('\x00') event_data.parameters = ( variable_length_section.parameters.rstrip('\x00')) event_data.username = variable_length_section.author.rstrip('\x00') event_data.working_directory = ( variable_length_section.working_directory.rstrip('\x00')) return event_data
287,721
Parses the last run time from a fixed-length data section. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. fixed_length_section (job_fixed_length_data_section): a Windows Scheduled Task job fixed-length data section. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available.
def _ParseLastRunTime(self, parser_mediator, fixed_length_section): systemtime_struct = fixed_length_section.last_run_time system_time_tuple = ( systemtime_struct.year, systemtime_struct.month, systemtime_struct.weekday, systemtime_struct.day_of_month, systemtime_struct.hours, systemtime_struct.minutes, systemtime_struct.seconds, systemtime_struct.milliseconds) date_time = None if system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE: try: date_time = dfdatetime_systemtime.Systemtime( system_time_tuple=system_time_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid last run time: {0!s}'.format(system_time_tuple)) return date_time
287,722
Parses the end time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available.
def _ParseTriggerEndTime(self, parser_mediator, trigger): time_elements_tuple = ( trigger.end_date.year, trigger.end_date.month, trigger.end_date.day_of_month, 0, 0, 0) date_time = None if time_elements_tuple != (0, 0, 0, 0, 0, 0): try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True # TODO: add functionality to dfdatetime to control precision. date_time._precision = dfdatetime_definitions.PRECISION_1_DAY # pylint: disable=protected-access except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid trigger end time: {0!s}'.format(time_elements_tuple)) return date_time
287,723
Parses the start time from a trigger. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. trigger (job_trigger): a trigger. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available.
def _ParseTriggerStartTime(self, parser_mediator, trigger): time_elements_tuple = ( trigger.start_date.year, trigger.start_date.month, trigger.start_date.day_of_month, trigger.start_time.hours, trigger.start_time.minutes, 0) date_time = None if time_elements_tuple != (0, 0, 0, 0, 0, 0): try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True # TODO: add functionality to dfdatetime to control precision. date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE # pylint: disable=protected-access except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid trigger start time: {0!s}'.format(time_elements_tuple)) return date_time
287,724
Parses a Windows job file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): fixed_section_data_map = self._GetDataTypeMap( 'job_fixed_length_data_section') try: fixed_length_section, file_offset = self._ReadStructureFromFileObject( file_object, 0, fixed_section_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse fixed-length data section with error: {0!s}'.format( exception)) if not fixed_length_section.product_version in self._PRODUCT_VERSIONS: raise errors.UnableToParseFile( 'Unsupported product version in: 0x{0:04x}'.format( fixed_length_section.product_version)) if not fixed_length_section.format_version == 1: raise errors.UnableToParseFile( 'Unsupported format version in: {0:d}'.format( fixed_length_section.format_version)) variable_section_data_map = self._GetDataTypeMap( 'job_variable_length_data_section') try: variable_length_section, data_size = self._ReadStructureFromFileObject( file_object, file_offset, variable_section_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse variable-length data section with error: ' '{0!s}').format(exception)) file_offset += data_size event_data = self._ParseEventData(variable_length_section) date_time = self._ParseLastRunTime(parser_mediator, fixed_length_section) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_RUN) parser_mediator.ProduceEventWithEventData(event, event_data) trigger_data_map = self._GetDataTypeMap('job_trigger') for trigger_index in range(0, variable_length_section.number_of_triggers): try: trigger, data_size = self._ReadStructureFromFileObject( file_object, file_offset, trigger_data_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse trigger: {0:d} with error: {2!s}').format( trigger_index, exception)) file_offset += data_size event_data.trigger_type = trigger.trigger_type date_time = self._ParseTriggerStartTime(parser_mediator, trigger) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseTriggerEndTime(parser_mediator, trigger) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
287,725
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (AnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
def ParseOptions(cls, options, analysis_plugin): if not isinstance(analysis_plugin, tagging.TaggingAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of TaggingAnalysisPlugin') tagging_file = cls._ParseStringOption(options, 'tagging_file') if not tagging_file: raise errors.BadConfigOption( 'Tagging analysis plugin requires a tagging file.') tagging_file_path = tagging_file if not os.path.isfile(tagging_file_path): # Check if the file exists in the data location path. data_location = getattr(options, 'data_location', None) if data_location: tagging_file_path = os.path.join(data_location, tagging_file) if not os.path.isfile(tagging_file_path): raise errors.BadConfigOption( 'No such tagging file: {0:s}.'.format(tagging_file)) try: analysis_plugin.SetAndLoadTagFile(tagging_file_path) except UnicodeDecodeError: raise errors.BadConfigOption( 'Invalid tagging file: {0:s} encoding must be UTF-8.'.format( tagging_file)) except errors.TaggingFileError as exception: raise errors.BadConfigOption( 'Unable to read tagging file: {0:s} with error: {1!s}'.format( tagging_file, exception))
287,726
Initializes a circular buffer object. Args: size (int): number of elements in the buffer.
def __init__(self, size): super(CircularBuffer, self).__init__() self._index = 0 self._list = [] self._size = size
287,727
Add an item to the list. Args: item (object): item.
def Append(self, item): if self._index >= self._size: self._index = self._index % self._size try: self._list[self._index] = item except IndexError: self._list.append(item) self._index += 1
287,729
Initializes an event data attribute container. Args: data_type (Optional[str]): event data type indicator.
def __init__(self, data_type=None): super(EventData, self).__init__() self.data_type = data_type self.offset = None self.query = None
287,732
Initializes an event tag attribute container. Args: comment (Optional[str]): comments.
def __init__(self, comment=None): super(EventTag, self).__init__() self._event_identifier = None self.comment = comment self.event_entry_index = None self.event_row_identifier = None self.event_stream_number = None self.labels = []
287,734
Adds a comment to the event tag. Args: comment (str): comment.
def AddComment(self, comment): if not comment: return if not self.comment: self.comment = comment else: self.comment = ''.join([self.comment, comment])
287,735
Adds a label to the event tag. Args: label (str): label. Raises: TypeError: if the label provided is not a string. ValueError: if a label is malformed.
def AddLabel(self, label): if not isinstance(label, py2to3.STRING_TYPES): raise TypeError('label is not a string type. Is {0:s}'.format( type(label))) if not self._VALID_LABEL_REGEX.match(label): raise ValueError(( 'Unsupported label: "{0:s}". A label must only consist of ' 'alphanumeric characters or underscores.').format(label)) if label not in self.labels: self.labels.append(label)
287,736
Adds labels to the event tag. Args: labels (list[str]): labels. Raises: ValueError: if a label is malformed.
def AddLabels(self, labels): for label in labels: if not self._VALID_LABEL_REGEX.match(label): raise ValueError(( 'Unsupported label: "{0:s}". A label must only consist of ' 'alphanumeric characters or underscores.').format(label)) for label in labels: if label not in self.labels: self.labels.append(label)
287,737
Copies a string to a label. A label only supports a limited set of characters therefore unsupported characters are replaced with an underscore. Args: text (str): label text. prefix (Optional[str]): label prefix. Returns: str: label.
def CopyTextToLabel(cls, text, prefix=''): text = '{0:s}{1:s}'.format(prefix, text) return cls._INVALID_LABEL_CHARACTERS_REGEX.sub('_', text)
287,739
Parses a FILETIME date and time value from a byte stream. Args: byte_stream (bytes): byte stream. Returns: dfdatetime.Filetime: FILETIME date and time value or None if no value is set. Raises: ParseError: if the FILETIME could not be parsed.
def _ParseFiletime(self, byte_stream): filetime_map = self._GetDataTypeMap('filetime') try: filetime = self._ReadStructureFromByteStream( byte_stream, 0, filetime_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse FILETIME value with error: {0!s}'.format( exception)) if filetime == 0: return None try: return dfdatetime_filetime.Filetime(timestamp=filetime) except ValueError: raise errors.ParseError( 'Invalid FILETIME value: 0x{0:08x}'.format(filetime))
287,743
Extracts events from a ShutdownTime Windows Registry value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): shutdown_value = registry_key.GetValueByName('ShutdownTime') if not shutdown_value: return try: date_time = self._ParseFiletime(shutdown_value.data) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to determine shutdown timestamp with error: {0!s}'.format( exception)) return if not date_time: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = ShutdownWindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = shutdown_value.offset event_data.value_name = shutdown_value.name event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
287,744
Parses a Windows Shortcut (LNK) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
def ParseFileObject(self, parser_mediator, file_object): display_name = parser_mediator.GetDisplayName() self.ParseFileLNKFile(parser_mediator, file_object, display_name)
287,746
Parses a Windows Shortcut (LNK) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. display_name (str): display name.
def ParseFileLNKFile( self, parser_mediator, file_object, display_name): lnk_file = pylnk.file() lnk_file.set_ascii_codepage(parser_mediator.codepage) try: lnk_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return link_target = None if lnk_file.link_target_identifier_data: # TODO: change file_entry.name to display name once it is generated # correctly. display_name = parser_mediator.GetFilename() shell_items_parser = shell_items.ShellItemsParser(display_name) shell_items_parser.ParseByteStream( parser_mediator, lnk_file.link_target_identifier_data, codepage=parser_mediator.codepage) link_target = shell_items_parser.CopyToPath() event_data = WinLnkLinkEventData() event_data.birth_droid_file_identifier = ( lnk_file.birth_droid_file_identifier) event_data.birth_droid_volume_identifier = ( lnk_file.birth_droid_volume_identifier) event_data.command_line_arguments = lnk_file.command_line_arguments event_data.description = lnk_file.description event_data.drive_serial_number = lnk_file.drive_serial_number event_data.drive_type = lnk_file.drive_type event_data.droid_file_identifier = lnk_file.droid_file_identifier event_data.droid_volume_identifier = lnk_file.droid_volume_identifier event_data.env_var_location = lnk_file.environment_variables_location event_data.file_attribute_flags = lnk_file.file_attribute_flags event_data.file_size = lnk_file.file_size event_data.icon_location = lnk_file.icon_location event_data.link_target = link_target event_data.local_path = lnk_file.local_path event_data.network_path = lnk_file.network_path event_data.relative_path = lnk_file.relative_path event_data.volume_label = lnk_file.volume_label event_data.working_directory = lnk_file.working_directory access_time = lnk_file.get_file_access_time_as_integer() if access_time != 0: date_time = dfdatetime_filetime.Filetime(timestamp=access_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) creation_time = lnk_file.get_file_creation_time_as_integer() if creation_time != 0: date_time = dfdatetime_filetime.Filetime(timestamp=creation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) modification_time = lnk_file.get_file_modification_time_as_integer() if modification_time != 0: date_time = dfdatetime_filetime.Filetime(timestamp=modification_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) if access_time == 0 and creation_time == 0 and modification_time == 0: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data) if lnk_file.droid_file_identifier: try: self._ParseDistributedTrackingIdentifier( parser_mediator, lnk_file.droid_file_identifier, display_name) except (TypeError, ValueError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to read droid file identifier with error: {0!s}.'.format( exception)) if lnk_file.birth_droid_file_identifier: try: self._ParseDistributedTrackingIdentifier( parser_mediator, lnk_file.birth_droid_file_identifier, display_name) except (TypeError, ValueError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to read birth droid file identifier with error: ' '{0!s}.').format(exception)) lnk_file.close()
287,747
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--slice', metavar='DATE', dest='slice', type=str, default='', action='store', help=( 'Create a time slice around a certain date. This parameter, if ' 'defined will display all events that happened X minutes before ' 'and after the defined date. X is controlled by the parameter ' '--slice_size but defaults to 5 minutes.')) argument_group.add_argument( '--slice_size', '--slice-size', dest='slice_size', type=int, default=5, action='store', help=( 'Defines the slice size. In the case of a regular time slice it ' 'defines the number of minutes the slice size should be. In the ' 'case of the --slicer it determines the number of events before ' 'and after a filter match has been made that will be included in ' 'the result set. The default value is 5. See --slice or --slicer ' 'for more details about this option.')) argument_group.add_argument( '--slicer', dest='slicer', action='store_true', default=False, help=( 'Create a time slice around every filter match. This parameter, ' 'if defined will save all X events before and after a filter ' 'match has been made. X is defined by the --slice_size ' 'parameter.')) argument_group.add_argument( 'filter', nargs='?', action='store', metavar='FILTER', default=None, type=str, help=( 'A filter that can be used to filter the dataset before it ' 'is written into storage. More information about the filters ' 'and how to use them can be found here: {0:s}').format( cls._DOCUMENTATION_URL))
287,749
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') filter_expression = cls._ParseStringOption(options, 'filter') filter_object = None if filter_expression: filter_object = event_filter.EventObjectFilter() try: filter_object.CompileFilter(filter_expression) except errors.ParseError as exception: raise errors.BadConfigOption(( 'Unable to compile filter expression with error: ' '{0!s}').format(exception)) time_slice_event_time_string = getattr(options, 'slice', None) time_slice_duration = getattr(options, 'slice_size', 5) use_time_slicer = getattr(options, 'slicer', False) # The slice and slicer cannot be set at the same time. if time_slice_event_time_string and use_time_slicer: raise errors.BadConfigOption( 'Time slice and slicer cannot be used at the same time.') time_slice_event_timestamp = None if time_slice_event_time_string: # Note self._preferred_time_zone is None when not set but represents UTC. preferred_time_zone = getattr( configuration_object, '_preferred_time_zone', None) or 'UTC' timezone = pytz.timezone(preferred_time_zone) time_slice_event_timestamp = timelib.Timestamp.FromTimeString( time_slice_event_time_string, timezone=timezone) if time_slice_event_timestamp is None: raise errors.BadConfigOption( 'Unsupported time slice event date and time: {0:s}'.format( time_slice_event_time_string)) setattr(configuration_object, '_event_filter_expression', filter_expression) if filter_object: setattr(configuration_object, '_event_filter', filter_object) setattr(configuration_object, '_use_time_slicer', use_time_slicer) if time_slice_event_timestamp is not None or use_time_slicer: # Note that time slicer uses the time slice to determine the duration. # TODO: refactor TimeSlice to filters. time_slice = time_slices.TimeSlice( time_slice_event_timestamp, duration=time_slice_duration) setattr(configuration_object, '_time_slice', time_slice)
287,750
Initializes a log2timeline CLI tool. Args: input_reader (Optional[InputReader]): input reader, where None indicates that the stdin input reader should be used. output_writer (Optional[OutputWriter]): output writer, where None indicates that the stdout output writer should be used.
def __init__(self, input_reader=None, output_writer=None): super(Log2TimelineTool, self).__init__( input_reader=input_reader, output_writer=output_writer) self._command_line_arguments = None self._enable_sigsegv_handler = False self._number_of_extraction_workers = 0 self._storage_serializer_format = definitions.SERIALIZER_FORMAT_JSON self._source_type = None self._status_view = status_view.StatusView(self._output_writer, self.NAME) self._status_view_mode = status_view.StatusView.MODE_WINDOW self._stdout_output_writer = isinstance( self._output_writer, tools.StdoutOutputWriter) self._worker_memory_limit = None self.dependencies_check = True self.list_hashers = False self.list_parsers_and_plugins = False self.list_profilers = False self.show_info = False self.show_troubleshooting = False
287,751
Parses the options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def ParseOptions(self, options): # The extraction options are dependent on the data location. helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['data_location']) self._ReadParserPresetsFromFile() # Check the list options first otherwise required options will raise. argument_helper_names = ['hashers', 'parsers', 'profiling'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) self._ParseTimezoneOption(options) self.list_hashers = self._hasher_names_string == 'list' self.list_parsers_and_plugins = self._parser_filter_expression == 'list' self.list_profilers = self._profilers == 'list' self.show_info = getattr(options, 'show_info', False) self.show_troubleshooting = getattr(options, 'show_troubleshooting', False) if getattr(options, 'use_markdown', False): self._views_format_type = views.ViewsFactory.FORMAT_TYPE_MARKDOWN self.dependencies_check = getattr(options, 'dependencies_check', True) if (self.list_hashers or self.list_parsers_and_plugins or self.list_profilers or self.list_timezones or self.show_info or self.show_troubleshooting): return self._ParseInformationalOptions(options) argument_helper_names = [ 'artifact_definitions', 'artifact_filters', 'extraction', 'filter_file', 'status_view', 'storage_file', 'storage_format', 'text_prepend', 'yara_rules'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) self._ParseLogFileOptions(options) self._ParseStorageMediaOptions(options) self._ParsePerformanceOptions(options) self._ParseProcessingOptions(options) if not self._storage_file_path: raise errors.BadConfigOption('Missing storage file option.') serializer_format = getattr( options, 'serializer_format', definitions.SERIALIZER_FORMAT_JSON) if serializer_format not in definitions.SERIALIZER_FORMATS: raise errors.BadConfigOption( 'Unsupported storage serializer format: {0:s}.'.format( serializer_format)) self._storage_serializer_format = serializer_format # TODO: where is this defined? self._operating_system = getattr(options, 'os', None) if self._operating_system: self._mount_path = getattr(options, 'filename', None) helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['status_view']) self._enable_sigsegv_handler = getattr(options, 'sigsegv_handler', False) self._EnforceProcessMemoryLimit(self._process_memory_limit)
287,754
Parses an Android usage-history file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): data = file_object.read(self._HEADER_READ_SIZE) if not data.startswith(b'<?xml'): raise errors.UnableToParseFile( 'Not an Android usage history file [not XML]') _, _, data = data.partition(b'\n') if not data.startswith(b'<usage-history'): raise errors.UnableToParseFile( 'Not an Android usage history file [wrong XML root key]') # The current offset of the file-like object needs to point at # the start of the file for ElementTree to parse the XML data correctly. file_object.seek(0, os.SEEK_SET) xml = ElementTree.parse(file_object) root_node = xml.getroot() for application_node in root_node: package_name = application_node.get('name', None) for part_node in application_node.iter(): if part_node.tag != 'comp': continue last_resume_time = part_node.get('lrt', None) if last_resume_time is None: parser_mediator.ProduceExtractionWarning('missing last resume time.') continue try: last_resume_time = int(last_resume_time, 10) except ValueError: parser_mediator.ProduceExtractionWarning( 'unsupported last resume time: {0:s}.'.format(last_resume_time)) continue event_data = AndroidAppUsageEventData() event_data.component = part_node.get('name', None) event_data.package = package_name date_time = dfdatetime_java_time.JavaTime(timestamp=last_resume_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_RESUME) parser_mediator.ProduceEventWithEventData(event, event_data)
287,758
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: if the required artifact definitions are not defined.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') artifacts_path = getattr(options, 'artifact_definitions_path', None) data_location = getattr(configuration_object, '_data_location', None) if ((not artifacts_path or not os.path.exists(artifacts_path)) and data_location): artifacts_path = os.path.dirname(data_location) artifacts_path = os.path.join(artifacts_path, 'artifacts') if not os.path.exists(artifacts_path) and 'VIRTUAL_ENV' in os.environ: artifacts_path = os.path.join( os.environ['VIRTUAL_ENV'], 'share', 'artifacts') if not os.path.exists(artifacts_path): artifacts_path = os.path.join(sys.prefix, 'share', 'artifacts') if not os.path.exists(artifacts_path): artifacts_path = os.path.join(sys.prefix, 'local', 'share', 'artifacts') if sys.prefix != '/usr': if not os.path.exists(artifacts_path): artifacts_path = os.path.join('/usr', 'share', 'artifacts') if not os.path.exists(artifacts_path): artifacts_path = os.path.join('/usr', 'local', 'share', 'artifacts') if not os.path.exists(artifacts_path): artifacts_path = None if not artifacts_path or not os.path.exists(artifacts_path): raise errors.BadConfigOption( 'Unable to determine path to artifact definitions.') custom_artifacts_path = getattr( options, 'custom_artifact_definitions_path', None) if custom_artifacts_path and not os.path.isfile(custom_artifacts_path): raise errors.BadConfigOption( 'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path)) if custom_artifacts_path: logger.info( 'Custom artifact filter file: {0:s}'.format(custom_artifacts_path)) registry = artifacts_registry.ArtifactDefinitionsRegistry() reader = artifacts_reader.YamlArtifactsReader() logger.info( 'Determined artifact definitions path: {0:s}'.format(artifacts_path)) try: registry.ReadFromDirectory(reader, artifacts_path) except (KeyError, artifacts_errors.FormatError) as exception: raise errors.BadConfigOption(( 'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}').format(artifacts_path, exception)) for name in preprocessors_manager.PreprocessPluginsManager.GetNames(): if not registry.GetDefinitionByName(name): raise errors.BadConfigOption( 'Missing required artifact definition: {0:s}'.format(name)) if custom_artifacts_path: try: registry.ReadFromFile(reader, custom_artifacts_path) except (KeyError, artifacts_errors.FormatError) as exception: raise errors.BadConfigOption(( 'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}').format(custom_artifacts_path, exception)) setattr(configuration_object, '_artifact_definitions_path', artifacts_path) setattr( configuration_object, '_custom_artifacts_path', custom_artifacts_path)
287,759
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (WindowsServicePlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type.
def ParseOptions(cls, options, analysis_plugin): if not isinstance( analysis_plugin, windows_services.WindowsServicesAnalysisPlugin): raise errors.BadConfigObject(( 'Analysis plugin is not an instance of ' 'WindowsServicesAnalysisPlugin')) output_format = cls._ParseStringOption( options, 'windows_services_output', default_value=cls._DEFAULT_OUTPUT) analysis_plugin.SetOutputFormat(output_format)
287,760
Opens a Windows Registry file-like object. Args: file_object (dfvfs.FileIO): Windows Registry file-like object. ascii_codepage (Optional[str]): ASCII string codepage. Returns: WinRegistryFile: Windows Registry file or None.
def Open(self, file_object, ascii_codepage='cp1252'): registry_file = dfwinreg_regf.REGFWinRegistryFile( ascii_codepage=ascii_codepage) # We don't catch any IOErrors here since we want to produce a parse error # from the parser if this happens. registry_file.Open(file_object) return registry_file
287,761
Determines if a plugin can process a Windows Registry key or its values. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. plugin (WindowsRegistryPlugin): Windows Registry plugin. Returns: bool: True if the Registry key can be processed with the plugin.
def _CanProcessKeyWithPlugin(self, registry_key, plugin): for registry_key_filter in plugin.FILTERS: # Skip filters that define key paths since they are already # checked by the path filter. if getattr(registry_key_filter, 'key_paths', []): continue if registry_key_filter.Match(registry_key): return True return False
287,763
Parses the Registry key with a specific plugin. Args: parser_mediator (ParserMediator): parser mediator. registry_key (dfwinreg.WinRegistryKey): Windwos Registry key. plugin (WindowsRegistryPlugin): Windows Registry plugin.
def _ParseKeyWithPlugin(self, parser_mediator, registry_key, plugin): try: plugin.UpdateChainAndProcess(parser_mediator, registry_key) except (IOError, dfwinreg_errors.WinRegistryValueError) as exception: parser_mediator.ProduceExtractionWarning( 'in key: {0:s} error: {1!s}'.format(registry_key.path, exception))
287,764
Normalizes a Windows Registry key path. Args: key_path (str): Windows Registry key path. Returns: str: normalized Windows Registry key path.
def _NormalizeKeyPath(self, key_path): normalized_key_path = key_path.lower() # The Registry key path should start with: # HKEY_LOCAL_MACHINE\System\ControlSet followed by 3 digits # which makes 39 characters. if (len(normalized_key_path) < 39 or not normalized_key_path.startswith(self._CONTROL_SET_PREFIX)): return normalized_key_path # Key paths that contain ControlSet### must be normalized to # CurrentControlSet. return ''.join([ self._NORMALIZED_CONTROL_SET_PREFIX, normalized_key_path[39:]])
287,765
Parses the Registry key with a specific plugin. Args: parser_mediator (ParserMediator): parser mediator. registry_key (dfwinreg.WinRegistryKey): Windwos Registry key.
def _ParseKey(self, parser_mediator, registry_key): matching_plugin = None normalized_key_path = self._NormalizeKeyPath(registry_key.path) if self._path_filter.CheckPath(normalized_key_path): matching_plugin = self._plugin_per_key_path[normalized_key_path] else: for plugin in self._plugins_without_key_paths: if self._CanProcessKeyWithPlugin(registry_key, plugin): matching_plugin = plugin break if not matching_plugin: matching_plugin = self._default_plugin if matching_plugin: self._ParseKeyWithPlugin(parser_mediator, registry_key, matching_plugin)
287,766
Parses the Registry keys recursively. Args: parser_mediator (ParserMediator): parser mediator. root_key (dfwinreg.WinRegistryKey): root Windows Registry key.
def _ParseRecurseKeys(self, parser_mediator, root_key): for registry_key in root_key.RecurseKeys(): if parser_mediator.abort: break self._ParseKey(parser_mediator, registry_key)
287,767
Parses the Registry keys from FindSpecs. Args: parser_mediator (ParserMediator): parser mediator. win_registry (dfwinreg.WinRegistryKey): root Windows Registry key. find_specs (dfwinreg.FindSpecs): Keys to search for.
def _ParseKeysFromFindSpecs(self, parser_mediator, win_registry, find_specs): searcher = dfwinreg_registry_searcher.WinRegistrySearcher(win_registry) for registry_key_path in iter(searcher.Find(find_specs=find_specs)): if parser_mediator.abort: break registry_key = searcher.GetKeyByPath(registry_key_path) self._ParseKey(parser_mediator, registry_key)
287,768
Parses a Windows Registry file-like object. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): a file-like object.
def ParseFileObject(self, parser_mediator, file_object): win_registry_reader = FileObjectWinRegistryFileReader() try: registry_file = win_registry_reader.Open(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open Windows Registry file with error: {0!s}'.format( exception)) return win_registry = dfwinreg_registry.WinRegistry() key_path_prefix = win_registry.GetRegistryFileMapping(registry_file) registry_file.SetKeyPathPrefix(key_path_prefix) root_key = registry_file.GetRootKey() if not root_key: return registry_find_specs = getattr( parser_mediator.artifacts_filter_helper, 'registry_find_specs', None) if not registry_find_specs: try: self._ParseRecurseKeys(parser_mediator, root_key) except IOError as exception: parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception)) else: artifacts_filter_helper = artifact_filters.ArtifactDefinitionsFilterHelper if not artifacts_filter_helper.CheckKeyCompatibility(key_path_prefix): logger.warning(( 'Artifacts filters are not supported for Windows Registry file ' 'with key path prefix: "{0:s}".').format(key_path_prefix)) else: try: win_registry.MapFile(key_path_prefix, registry_file) self._ParseKeysFromFindSpecs( parser_mediator, win_registry, registry_find_specs) except IOError as exception: parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))
287,769
Creates a storage reader based on the file. Args: path (str): path to the storage file. Returns: StorageReader: a storage reader or None if the storage file cannot be opened or the storage format is not supported.
def CreateStorageReaderForFile(cls, path): if sqlite_file.SQLiteStorageFile.CheckSupportedFormat( path, check_readable_only=True): return sqlite_reader.SQLiteStorageFileReader(path) return None
287,770
Creates a storage writer. Args: session (Session): session the storage changes are part of. path (str): path to the storage file. storage_format (str): storage format. Returns: StorageWriter: a storage writer or None if the storage file cannot be opened or the storage format is not supported.
def CreateStorageWriter(cls, storage_format, session, path): if storage_format == definitions.STORAGE_FORMAT_SQLITE: return sqlite_writer.SQLiteStorageFileWriter(session, path) return None
287,771
Creates a storage writer based on the file. Args: session (Session): session the storage changes are part of. path (str): path to the storage file. Returns: StorageWriter: a storage writer or None if the storage file cannot be opened or the storage format is not supported.
def CreateStorageWriterForFile(cls, session, path): if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(path): return sqlite_writer.SQLiteStorageFileWriter(session, path) return None
287,772
Initializes the output module object. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
def __init__(self, output_mediator): super(MySQL4n6TimeOutputModule, self).__init__(output_mediator) self._connection = None self._count = None self._cursor = None self._dbname = 'log2timeline' self._host = 'localhost' self._password = 'forensic' self._port = None self._user = 'root'
287,773
Sets the database credentials. Args: password (Optional[str]): password to access the database. username (Optional[str]): username to access the database.
def SetCredentials(self, password=None, username=None): if password: self._password = password if username: self._user = username
287,775
Sets the server information. Args: server (str): hostname or IP address of the database server. port (int): port number of the database server.
def SetServerInformation(self, server, port): self._host = server self._port = port
287,776
Writes the body of an event object to the output. Args: event (EventObject): event.
def WriteEventBody(self, event): if not hasattr(event, 'timestamp'): return row = self._GetSanitizedEventValues(event) try: self._cursor.execute(self._INSERT_QUERY, row) except MySQLdb.Error as exception: logger.warning( 'Unable to insert into database with error: {0!s}.'.format( exception)) self._count += 1 # TODO: Experiment if committing the current transaction # every 10000 inserts is the optimal approach. if self._count % 10000 == 0: self._connection.commit() if self._set_status: self._set_status('Inserting event: {0:d}'.format(self._count))
287,777
Initializes a table view. Args: column_names (Optional[list[str]]): column names. title (Optional[str]): title.
def __init__(self, column_names=None, title=None): super(BaseTableView, self).__init__() self._columns = column_names or [] self._number_of_columns = len(self._columns) self._rows = [] self._title = title
287,779
Adds a row of values. Args: values (list[object]): values. Raises: ValueError: if the number of values is out of bounds.
def AddRow(self, values): if self._number_of_columns and len(values) != self._number_of_columns: raise ValueError('Number of values is out of bounds.') self._rows.append(values) if not self._number_of_columns: self._number_of_columns = len(values)
287,780
Initializes a command line table view. Args: column_names (Optional[list[str]]): column names. title (Optional[str]): title.
def __init__(self, column_names=None, title=None): super(CLITableView, self).__init__(column_names=column_names, title=title) if self._columns: self._column_width = len(self._columns[0]) else: self._column_width = 0
287,781
Writes a header. Args: output_writer (OutputWriter): output writer.
def _WriteHeader(self, output_writer): header_string = '' if self._title: header_string = ' {0:s} '.format(self._title) header_string = self._HEADER_FORMAT_STRING.format(header_string) output_writer.Write(header_string)
287,782
Writes a row of values aligned to the column width. Args: output_writer (OutputWriter): output writer. values (list[object]): values.
def _WriteRow(self, output_writer, values): maximum_row_width = self._MAXIMUM_WIDTH - self._column_width - 3 # The format string of the first line of the column value. primary_format_string = '{{0:>{0:d}s}} : {{1:s}}\n'.format( self._column_width) # The format string of successive lines of the column value. secondary_format_string = '{{0:<{0:d}s}}{{1:s}}\n'.format( self._column_width + 3) if isinstance(values[1], py2to3.STRING_TYPES): value_string = values[1] else: value_string = '{0!s}'.format(values[1]) if len(value_string) < maximum_row_width: output_writer.Write(primary_format_string.format( values[0], value_string)) return # Split the column value in words. words = value_string.split() current = 0 lines = [] word_buffer = [] for word in words: current += len(word) + 1 if current >= maximum_row_width: current = len(word) lines.append(' '.join(word_buffer)) word_buffer = [word] else: word_buffer.append(word) lines.append(' '.join(word_buffer)) # Split the column value across multiple lines. output_writer.Write( primary_format_string.format(values[0], lines[0])) for line in lines[1:]: output_writer.Write(secondary_format_string.format('', line))
287,783
Adds a row of values. Args: values (list[object]): values. Raises: ValueError: if the number of values is out of bounds.
def AddRow(self, values): super(CLITableView, self).AddRow(values) value_length = len(values[0]) if value_length > self._column_width: self._column_width = value_length
287,784
Writes the table to the output writer. Args: output_writer (OutputWriter): output writer. Raises: RuntimeError: if the title exceeds the maximum width or if the table has more than 2 columns or if the column width is out of bounds.
def Write(self, output_writer): if self._title and len(self._title) > self._MAXIMUM_WIDTH: raise RuntimeError('Title length out of bounds.') if self._number_of_columns not in (0, 2): raise RuntimeError('Unsupported number of columns: {0:d}.'.format( self._number_of_columns)) if self._column_width < 0 or self._column_width >= self._MAXIMUM_WIDTH: raise RuntimeError('Column width out of bounds.') output_writer.Write('\n') self._WriteHeader(output_writer) if self._columns: self._WriteRow(output_writer, self._columns) self._WriteSeparatorLine(output_writer) for values in self._rows: self._WriteRow(output_writer, values) self._WriteSeparatorLine(output_writer)
287,785
Initializes a command line table view. Args: column_names (Optional[list[str]]): column names. column_sizes (Optional[list[int]]): minimum column sizes, in number of characters. If a column name or row value is larger than the minimum column size the column will be enlarged. Note that the minimum columns size will be rounded up to the number of spaces of the next tab. title (Optional[str]): title.
def __init__(self, column_names=None, column_sizes=None, title=None): super(CLITabularTableView, self).__init__( column_names=column_names, title=title) self._column_sizes = column_sizes or []
287,786
Adds a row of values. Args: values (list[object]): values. Raises: ValueError: if the number of values is out of bounds.
def AddRow(self, values): if self._number_of_columns and len(values) != self._number_of_columns: raise ValueError('Number of values is out of bounds.') if not self._column_sizes and self._columns: self._column_sizes = [len(column) for column in self._columns] value_strings = [] for value_index, value_string in enumerate(values): if not isinstance(value_string, py2to3.UNICODE_TYPE): value_string = '{0!s}'.format(value_string) value_strings.append(value_string) self._column_sizes[value_index] = max( self._column_sizes[value_index], len(value_string)) self._rows.append(value_strings) if not self._number_of_columns: self._number_of_columns = len(value_strings)
287,787
Writes the table to the output writer. Args: output_writer (OutputWriter): output writer.
def Write(self, output_writer): if self._title: output_writer.Write('### {0:s}\n\n'.format(self._title)) if not self._columns: self._columns = ['' for _ in range(0, self._number_of_columns)] output_writer.Write(' | '.join(self._columns)) output_writer.Write('\n') output_writer.Write(' | '.join(['---' for _ in self._columns])) output_writer.Write('\n') for values in self._rows: values = ['{0!s}'.format(value) for value in values] output_writer.Write(' | '.join(values)) output_writer.Write('\n') output_writer.Write('\n')
287,788
Retrieves a table view. Args: format_type (str): table view format type. column_names (Optional[list[str]]): column names. title (Optional[str]): title. Returns: BaseTableView: table view. Raises: ValueError: if the format type is not supported.
def GetTableView(cls, format_type, column_names=None, title=None): view_class = cls._TABLE_VIEW_FORMAT_CLASSES.get(format_type, None) if not view_class: raise ValueError('Unsupported format type: {0:s}'.format(format_type)) return view_class(column_names=column_names, title=title)
287,789
Retrieves a data type map defined by the definition file. The data type maps are cached for reuse. Args: name (str): name of the data type as defined by the definition file. Returns: dtfabric.DataTypeMap: data type map which contains a data type definition, such as a structure, that can be mapped onto binary data.
def _GetDataTypeMap(self, name): data_type_map = self._data_type_maps.get(name, None) if not data_type_map: data_type_map = self._fabric.CreateDataTypeMap(name) self._data_type_maps[name] = data_type_map return data_type_map
287,791
Reads a dtFabric definition file. Args: filename (str): name of the dtFabric definition file. Returns: dtfabric.DataTypeFabric: data type fabric which contains the data format data type maps of the data type definition, such as a structure, that can be mapped onto binary data or None if no filename is provided.
def _ReadDefinitionFile(self, filename): if not filename: return None path = os.path.join(self._DEFINITION_FILES_PATH, filename) with open(path, 'rb') as file_object: definition = file_object.read() return dtfabric_fabric.DataTypeFabric(yaml_definition=definition)
287,792
Parses URIs containing .md and replaces them with their HTML page. Args: node(node): docutils node. Returns: node: docutils node.
def find_and_replace(self, node): if isinstance(node, nodes.reference) and 'refuri' in node: reference_uri = node['refuri'] if reference_uri.endswith('.md') and not reference_uri.startswith('http'): reference_uri = reference_uri[:-3] + '.html' node['refuri'] = reference_uri else: match = self.ANCHOR_REGEX.match(reference_uri) if match: node['refuri'] = '{0:s}.html#{1:s}'.format( match.group('uri'), match.group('anchor')) return node
287,796
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--fields', dest='fields', type=str, action='store', default=cls._DEFAULT_FIELDS, help=( 'Defines which fields should be included in the output.')) argument_group.add_argument( '--additional_fields', dest='additional_fields', type=str, action='store', default='', help=( 'Defines extra fields to be included in the output, in addition to' ' the default fields, which are {0:s}.'.format( cls._DEFAULT_FIELDS))) argument_group.add_argument( '--timestamp_format', dest='timestamp_format', type=str, action='store', default=cls._DEFAULT_TIMESTAMP_FORMAT, help=( 'Set the timestamp format that will be used in the datetime' 'column of the XLSX spreadsheet.'))
287,798
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (XLSXOutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when the output filename was not provided.
def ParseOptions(cls, options, output_module): if not isinstance(output_module, xlsx.XLSXOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of XLSXOutputModule') fields = cls._ParseStringOption( options, 'fields', default_value=cls._DEFAULT_FIELDS) additional_fields = cls._ParseStringOption(options, 'additional_fields') if additional_fields: fields = '{0:s},{1:s}'.format(fields, additional_fields) filename = getattr(options, 'write', None) if not filename: raise errors.BadConfigOption( 'Output filename was not provided use "-w filename" to specify.') timestamp_format = cls._ParseStringOption( options, 'timestamp_format', default_value=cls._DEFAULT_TIMESTAMP_FORMAT) output_module.SetFields([ field_name.strip() for field_name in fields.split(',')]) output_module.SetFilename(filename) output_module.SetTimestampFormat(timestamp_format)
287,799
Build a dictionary of the value in the strings table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. table (pyesedb.table): strings table. Returns: dict[str,object]: values per column name.
def _GetDictFromStringsTable(self, parser_mediator, table): if not table: return {} record_values = {} for record in table.records: if parser_mediator.abort: break if record.get_number_of_values() != 2: continue identification = self._GetRecordValue(record, 0) filename = self._GetRecordValue(record, 1) if not identification: continue record_values[identification] = filename return record_values
287,803
Parses the namespace table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
def ParseNameSpace( self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs): if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') strings = cache.GetResults('strings') if not strings: esedb_table = database.get_table_by_name('string') strings = self._GetDictFromStringsTable(parser_mediator, esedb_table) cache.StoreDictInCache('strings', strings) for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = FileHistoryNamespaceEventData() event_data.file_attribute = record_values.get('fileAttrib', None) event_data.identifier = record_values.get('id', None) event_data.parent_identifier = record_values.get('parentId', None) event_data.usn_number = record_values.get('usn', None) event_data.original_filename = strings.get(event_data.identifier, None) created_timestamp = record_values.get('fileCreated') if created_timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=created_timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) modified_timestamp = record_values.get('fileModified') if modified_timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=modified_timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) if not created_timestamp and not modified_timestamp: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
287,804
Initializes a date and time range. The timestamp are integers containing the number of microseconds since January 1, 1970, 00:00:00 UTC. Args: start_timestamp (int): timestamp that marks the start of the range. end_timestamp (int): timestamp that marks the end of the range. Raises: ValueError: If the time range is badly formed.
def __init__(self, start_timestamp, end_timestamp): if start_timestamp is None or end_timestamp is None: raise ValueError( 'Time range must have either a start and an end timestamp.') if start_timestamp > end_timestamp: raise ValueError( 'Invalid start must be earlier than end timestamp.') super(TimeRange, self).__init__() self.duration = end_timestamp - start_timestamp self.end_timestamp = end_timestamp self.start_timestamp = start_timestamp
287,805
Initializes an analysis plugin mediator. Args: storage_writer (StorageWriter): storage writer. knowledge_base (KnowledgeBase): contains information from the source data needed for analysis. data_location (Optional[str]): location of data files used during analysis.
def __init__(self, storage_writer, knowledge_base, data_location=None): super(AnalysisMediator, self).__init__() self._abort = False self._data_location = data_location self._event_filter_expression = None self._knowledge_base = knowledge_base self._mount_path = None self._storage_writer = storage_writer self._text_prepend = None self.last_activity_timestamp = 0.0 self.number_of_produced_analysis_reports = 0 self.number_of_produced_event_tags = 0
287,806
Retrieves the display name for a path specification. Args: path_spec (dfvfs.PathSpec): path specification. Returns: str: human readable version of the path specification.
def GetDisplayNameForPathSpec(self, path_spec): return path_helper.PathHelper.GetDisplayNameForPathSpec( path_spec, mount_path=self._mount_path, text_prepend=self._text_prepend)
287,807
Produces an analysis report. Args: plugin (AnalysisPlugin): plugin.
def ProduceAnalysisReport(self, plugin): analysis_report = plugin.CompileReport(self) if not analysis_report: return analysis_report.time_compiled = timelib.Timestamp.GetNow() plugin_name = getattr(analysis_report, 'plugin_name', plugin.plugin_name) if plugin_name: analysis_report.plugin_name = plugin_name if self._event_filter_expression: # TODO: rename filter string when refactoring the analysis reports. analysis_report.filter_string = self._event_filter_expression self._storage_writer.AddAnalysisReport(analysis_report) self.number_of_produced_analysis_reports += 1 self.number_of_produced_event_tags = ( self._storage_writer.number_of_event_tags) self.last_activity_timestamp = time.time()
287,808
Produces an event tag. Args: event_tag (EventTag): event tag.
def ProduceEventTag(self, event_tag): self._storage_writer.AddEventTag(event_tag) self.number_of_produced_event_tags += 1 self.last_activity_timestamp = time.time()
287,809
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string.
def GetMessages(self, formatter_mediator, event): event_values = event.CopyToDict() # TODO: clean up the default formatter and add a test to make sure # it is clear how it is intended to work. text_pieces = [] for key, value in event_values.items(): if key in definitions.RESERVED_VARIABLE_NAMES: continue text_pieces.append('{0:s}: {1!s}'.format(key, value)) event_values['attribute_driven'] = ' '.join(text_pieces) event_values['data_type'] = self.DATA_TYPE return self._FormatMessages( self.FORMAT_STRING, self.FORMAT_STRING_SHORT, event_values)
287,810
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--language', metavar='LANGUAGE', dest='preferred_language', default='en-US', type=str, help=( 'The preferred language identifier for Windows Event Log message ' 'strings. Use "--language list" to see a list of available ' 'language identifiers. Note that formatting will fall back on ' 'en-US (LCID 0x0409) if the preferred language is not available ' 'in the database of message string templates.'))
287,811
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') preferred_language = cls._ParseStringOption( options, 'preferred_language', default_value='en-US') setattr(configuration_object, '_preferred_language', preferred_language)
287,812
Pushes an event onto the heap. Args: event (EventObject): event.
def PushEvent(self, event): macb_group_identifier, content_identifier = self._GetEventIdentifiers(event) # We can ignore the timestamp here because the psort engine only stores # events with the same timestamp in the event heap. heap_values = (macb_group_identifier or '', content_identifier, event) heapq.heappush(self._heap, heap_values)
287,816
Initializes an engine object. Args: use_zeromq (Optional[bool]): True if ZeroMQ should be used for queuing instead of Python's multiprocessing queue.
def __init__(self, use_zeromq=True): super(PsortMultiProcessEngine, self).__init__() self._analysis_plugins = {} self._completed_analysis_processes = set() self._data_location = None self._event_filter_expression = None self._event_queues = {} self._event_tag_index = event_tag_index.EventTagIndex() self._events_status = processing_status.EventsStatus() # The export event heap is used to make sure the events are sorted in # a deterministic way. self._export_event_heap = PsortEventHeap() self._export_event_timestamp = 0 self._guppy_memory_profiler = None self._knowledge_base = None self._memory_profiler = None self._merge_task = None self._number_of_consumed_event_tags = 0 self._number_of_consumed_events = 0 self._number_of_consumed_reports = 0 self._number_of_consumed_sources = 0 self._number_of_consumed_warnings = 0 self._number_of_produced_event_tags = 0 self._number_of_produced_events = 0 self._number_of_produced_reports = 0 self._number_of_produced_sources = 0 self._number_of_produced_warnings = 0 self._processing_configuration = None self._processing_profiler = None self._serializers_profiler = None self._status = definitions.STATUS_INDICATOR_IDLE self._status_update_callback = None self._use_zeromq = use_zeromq self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT
287,817