docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Parses the credential options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseCredentialOptions(self, options): credentials = getattr(options, 'credentials', []) if not isinstance(credentials, list): raise errors.BadConfigOption('Unsupported credentials value.') for credential_string in credentials: credential_type, _, credential_data = credential_string.partition(':') if not credential_type or not credential_data: raise errors.BadConfigOption( 'Badly formatted credential: {0:s}.'.format(credential_string)) if credential_type not in self._SUPPORTED_CREDENTIAL_TYPES: raise errors.BadConfigOption( 'Unsupported credential type for: {0:s}.'.format( credential_string)) if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES: try: credential_data = credential_data.decode('hex') except TypeError: raise errors.BadConfigOption( 'Unsupported credential data for: {0:s}.'.format( credential_string)) self._credentials.append((credential_type, credential_data))
288,270
Parses the source path option. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseSourcePathOption(self, options): self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION) if not self._source_path: raise errors.BadConfigOption('Missing source path.') self._source_path = os.path.abspath(self._source_path)
288,271
Parses the storage media options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseStorageMediaOptions(self, options): self._ParseStorageMediaImageOptions(options) self._ParseVSSProcessingOptions(options) self._ParseCredentialOptions(options) self._ParseSourcePathOption(options)
288,272
Parses the storage media image options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseStorageMediaImageOptions(self, options): self._partitions = getattr(options, 'partitions', None) if self._partitions: try: self._ParseVolumeIdentifiersString(self._partitions, prefix='p') except ValueError: raise errors.BadConfigOption('Unsupported partitions') self._volumes = getattr(options, 'volumes', None) if self._volumes: try: self._ParseVolumeIdentifiersString(self._volumes, prefix='apfs') except ValueError: raise errors.BadConfigOption('Unsupported volumes')
288,273
Parses the VSS processing options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def _ParseVSSProcessingOptions(self, options): vss_only = False vss_stores = None self._process_vss = not getattr(options, 'no_vss', False) if self._process_vss: vss_only = getattr(options, 'vss_only', False) vss_stores = getattr(options, 'vss_stores', None) if vss_stores: try: self._ParseVolumeIdentifiersString(vss_stores, prefix='vss') except ValueError: raise errors.BadConfigOption('Unsupported VSS stores') self._vss_only = vss_only self._vss_stores = vss_stores
288,275
Prints an overview of APFS volume identifiers. Args: volume_system (dfvfs.APFSVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier.
def _PrintAPFSVolumeIdentifiersOverview( self, volume_system, volume_identifiers): header = 'The following Apple File System (APFS) volumes were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Name'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in volume_identifiers: volume = volume_system.GetVolumeByIdentifier(volume_identifier) if not volume: raise errors.SourceScannerError( 'Volume missing for identifier: {0:s}.'.format( volume_identifier)) volume_attribute = volume.GetAttribute('name') table_view.AddRow([volume.identifier, volume_attribute.value]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
288,276
Prints an overview of TSK partition identifiers. Args: volume_system (dfvfs.TSKVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier.
def _PrintTSKPartitionIdentifiersOverview( self, volume_system, volume_identifiers): header = 'The following partitions were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Offset (in bytes)', 'Size (in bytes)'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in sorted(volume_identifiers): volume = volume_system.GetVolumeByIdentifier(volume_identifier) if not volume: raise errors.SourceScannerError( 'Partition missing for identifier: {0:s}.'.format( volume_identifier)) volume_extent = volume.extents[0] volume_offset = '{0:d} (0x{0:08x})'.format(volume_extent.offset) volume_size = self._FormatHumanReadableSize(volume_extent.size) table_view.AddRow([volume.identifier, volume_offset, volume_size]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
288,277
Prints an overview of VSS store identifiers. Args: volume_system (dfvfs.VShadowVolumeSystem): volume system. volume_identifiers (list[str]): allowed volume identifiers. Raises: SourceScannerError: if a volume cannot be resolved from the volume identifier.
def _PrintVSSStoreIdentifiersOverview( self, volume_system, volume_identifiers): header = 'The following Volume Shadow Snapshots (VSS) were found:\n' self._output_writer.Write(header) column_names = ['Identifier', 'Creation Time'] table_view = views.CLITabularTableView(column_names=column_names) for volume_identifier in volume_identifiers: volume = volume_system.GetVolumeByIdentifier(volume_identifier) if not volume: raise errors.SourceScannerError( 'Volume missing for identifier: {0:s}.'.format( volume_identifier)) volume_attribute = volume.GetAttribute('creation_time') filetime = dfdatetime_filetime.Filetime(timestamp=volume_attribute.value) creation_time = filetime.CopyToDateTimeString() if volume.HasExternalData(): creation_time = '{0:s}\tWARNING: data stored outside volume'.format( creation_time) table_view.AddRow([volume.identifier, creation_time]) self._output_writer.Write('\n') table_view.Write(self._output_writer) self._output_writer.Write('\n')
288,278
Prompts the user to provide APFS volume identifiers. Args: volume_system (dfvfs.APFSVolumeSystem): volume system. volume_identifiers (list[str]): volume identifiers including prefix. Returns: list[str]: selected volume identifiers including prefix or None.
def _PromptUserForAPFSVolumeIdentifiers( self, volume_system, volume_identifiers): print_header = True while True: if print_header: self._PrintAPFSVolumeIdentifiersOverview( volume_system, volume_identifiers) print_header = False lines = self._textwrapper.wrap(self._USER_PROMPT_APFS) self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\nVolume identifiers: ') try: selected_volumes = self._ReadSelectedVolumes( volume_system, prefix='apfs') if (not selected_volumes or not set(selected_volumes).difference(volume_identifiers)): break except ValueError: pass self._output_writer.Write('\n') lines = self._textwrapper.wrap( 'Unsupported volume identifier(s), please try again or abort with ' 'Ctrl^C.') self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\n') return selected_volumes
288,279
Prompts the user to provide partition identifiers. Args: volume_system (dfvfs.TSKVolumeSystem): volume system. volume_identifiers (list[str]): volume identifiers including prefix. Returns: list[str]: selected volume identifiers including prefix or None.
def _PromptUserForPartitionIdentifiers( self, volume_system, volume_identifiers): print_header = True while True: if print_header: self._PrintTSKPartitionIdentifiersOverview( volume_system, volume_identifiers) print_header = False lines = self._textwrapper.wrap(self._USER_PROMPT_TSK) self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\nPartition identifiers: ') try: selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='p') if (selected_volumes and not set(selected_volumes).difference(volume_identifiers)): break except ValueError: pass self._output_writer.Write('\n') lines = self._textwrapper.wrap( 'Unsupported partition identifier(s), please try again or abort with ' 'Ctrl^C.') self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\n') return selected_volumes
288,280
Reads the selected volumes provided by the user. Args: volume_system (APFSVolumeSystem): volume system. prefix (Optional[str]): volume identifier prefix. Returns: list[str]: selected volume identifiers including prefix. Raises: KeyboardInterrupt: if the user requested to abort. ValueError: if the volume identifiers string could not be parsed.
def _ReadSelectedVolumes(self, volume_system, prefix='v'): volume_identifiers_string = self._input_reader.Read() volume_identifiers_string = volume_identifiers_string.strip() if not volume_identifiers_string: return [] selected_volumes = self._ParseVolumeIdentifiersString( volume_identifiers_string, prefix=prefix) if selected_volumes == ['all']: return [ '{0:s}{1:d}'.format(prefix, volume_index) for volume_index in range(1, volume_system.number_of_volumes + 1)] return selected_volumes
288,282
Scans an encrypted volume scan node for volume and file systems. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): volume scan node. Raises: SourceScannerError: if the format of or within the source is not supported, the scan node is invalid or there are no credentials defined for the format.
def _ScanEncryptedVolume(self, scan_context, scan_node): if not scan_node or not scan_node.path_spec: raise errors.SourceScannerError('Invalid or missing scan node.') credentials = credentials_manager.CredentialsManager.GetCredentials( scan_node.path_spec) if not credentials: raise errors.SourceScannerError('Missing credentials for scan node.') credentials_dict = { credential_type: credential_data for credential_type, credential_data in self._credentials} is_unlocked = False for credential_type in credentials.CREDENTIALS: credential_data = credentials_dict.get(credential_type, None) if not credential_data: continue is_unlocked = self._source_scanner.Unlock( scan_context, scan_node.path_spec, credential_type, credential_data) if is_unlocked: break if not is_unlocked: is_unlocked = self._PromptUserForEncryptedVolumeCredential( scan_context, scan_node, credentials) if is_unlocked: self._source_scanner.Scan( scan_context, scan_path_spec=scan_node.path_spec)
288,283
Scans a file system scan node for file systems. Args: scan_node (SourceScanNode): file system scan node. base_path_specs (list[PathSpec]): file system base path specifications. Raises: SourceScannerError: if the scan node is invalid.
def _ScanFileSystem(self, scan_node, base_path_specs): if not scan_node or not scan_node.path_spec: raise errors.SourceScannerError( 'Invalid or missing file system scan node.') base_path_specs.append(scan_node.path_spec)
288,284
Adds the credential options to the argument group. The credential options are use to unlock encrypted volumes. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
def AddCredentialOptions(self, argument_group): argument_group.add_argument( '--credential', action='append', default=[], type=str, dest='credentials', metavar='TYPE:DATA', help=( 'Define a credentials that can be used to unlock encrypted ' 'volumes e.g. BitLocker. The credential is defined as type:data ' 'e.g. "password:BDE-test". Supported credential types are: ' '{0:s}. Binary key data is expected to be passed in BASE-16 ' 'encoding (hexadecimal). WARNING credentials passed via command ' 'line arguments can end up in logs, so use this option with ' 'care.').format(', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))
288,285
Adds the storage media image options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
def AddStorageMediaImageOptions(self, argument_group): argument_group.add_argument( '--partitions', '--partition', dest='partitions', action='store', type=str, default=None, help=( 'Define partitions to be processed. A range of ' 'partitions can be defined as: "3..5". Multiple partitions can ' 'be defined as: "1,3,5" (a list of comma separated values). ' 'Ranges and lists can also be combined as: "1,3..5". The first ' 'partition is 1. All partitions can be specified with: "all".')) argument_group.add_argument( '--volumes', '--volume', dest='volumes', action='store', type=str, default=None, help=( 'Define volumes to be processed. A range of volumes can be defined ' 'as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list ' 'of comma separated values). Ranges and lists can also be combined ' 'as: "1,3..5". The first volume is 1. All volumes can be specified ' 'with: "all".'))
288,286
Adds the VSS processing options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
def AddVSSProcessingOptions(self, argument_group): argument_group.add_argument( '--no_vss', '--no-vss', dest='no_vss', action='store_true', default=False, help=( 'Do not scan for Volume Shadow Snapshots (VSS). This means that ' 'Volume Shadow Snapshots (VSS) are not processed.')) argument_group.add_argument( '--vss_only', '--vss-only', dest='vss_only', action='store_true', default=False, help=( 'Do not process the current volume if Volume Shadow Snapshots ' '(VSS) have been selected.')) argument_group.add_argument( '--vss_stores', '--vss-stores', dest='vss_stores', action='store', type=str, default=None, help=( 'Define Volume Shadow Snapshots (VSS) (or stores that need to be ' 'processed. A range of stores can be defined as: "3..5". ' 'Multiple stores can be defined as: "1,3,5" (a list of comma ' 'separated values). Ranges and lists can also be combined as: ' '"1,3..5". The first store is 1. All stores can be defined as: ' '"all".'))
288,287
Scans the source path for volume and file systems. This function sets the internal source path specification and source type values. Args: source_path (str): path to the source. Returns: dfvfs.SourceScannerContext: source scanner context. Raises: SourceScannerError: if the format of or within the source is not supported.
def ScanSource(self, source_path): # Symbolic links are resolved here and not earlier to preserve the user # specified source path in storage and reporting. if os.path.islink(source_path): source_path = os.path.realpath(source_path) if (not source_path.startswith('\\\\.\\') and not os.path.exists(source_path)): raise errors.SourceScannerError( 'No such device, file or directory: {0:s}.'.format(source_path)) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(source_path) try: self._source_scanner.Scan(scan_context) except (ValueError, dfvfs_errors.BackEndError) as exception: raise errors.SourceScannerError( 'Unable to scan source with error: {0!s}.'.format(exception)) if scan_context.source_type not in ( scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE): scan_node = scan_context.GetRootScanNode() self._source_path_specs.append(scan_node.path_spec) return scan_context # Get the first node where where we need to decide what to process. scan_node = scan_context.GetRootScanNode() while len(scan_node.sub_nodes) == 1: scan_node = scan_node.sub_nodes[0] base_path_specs = [] if scan_node.type_indicator != ( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION): self._ScanVolume(scan_context, scan_node, base_path_specs) else: # Determine which partition needs to be processed. partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node) if not partition_identifiers: raise errors.SourceScannerError('No partitions found.') for partition_identifier in partition_identifiers: location = '/{0:s}'.format(partition_identifier) sub_scan_node = scan_node.GetSubNodeByLocation(location) self._ScanVolume(scan_context, sub_scan_node, base_path_specs) if not base_path_specs: raise errors.SourceScannerError( 'No supported file system found in source.') self._source_path_specs = base_path_specs return scan_context
288,288
Initializes a Windows Registry file reader object. Args: file_system (dfvfs.FileSystem): file system. mount_point (dfvfs.PathSpec): mount point path specification. environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables.
def __init__(self, file_system, mount_point, environment_variables=None): super(FileSystemWinRegistryFileReader, self).__init__() self._file_system = file_system self._path_resolver = self._CreateWindowsPathResolver( file_system, mount_point, environment_variables=environment_variables)
288,289
Create a Windows path resolver and sets the environment variables. Args: file_system (dfvfs.FileSystem): file system. mount_point (dfvfs.PathSpec): mount point path specification. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: dfvfs.WindowsPathResolver: Windows path resolver.
def _CreateWindowsPathResolver( self, file_system, mount_point, environment_variables): if environment_variables is None: environment_variables = [] path_resolver = windows_path_resolver.WindowsPathResolver( file_system, mount_point) for environment_variable in environment_variables: name = environment_variable.name.lower() if name not in ('systemroot', 'userprofile'): continue path_resolver.SetEnvironmentVariable( environment_variable.name, environment_variable.value) return path_resolver
288,290
Opens the Windows Registry file specified by the path specification. Args: path_specification (dfvfs.PathSpec): path specification. ascii_codepage (Optional[str]): ASCII string codepage. Returns: WinRegistryFile: Windows Registry file or None.
def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'): if not path_specification: return None file_entry = self._file_system.GetFileEntryByPathSpec(path_specification) if file_entry is None: return None file_object = file_entry.GetFileObject() if file_object is None: return None registry_file = dfwinreg_regf.REGFWinRegistryFile( ascii_codepage=ascii_codepage) try: registry_file.Open(file_object) except IOError as exception: logger.warning( 'Unable to open Windows Registry file with error: {0!s}'.format( exception)) file_object.close() return None return registry_file
288,291
Opens the Windows Registry file specified by the path. Args: path (str): path of the Windows Registry file. ascii_codepage (Optional[str]): ASCII string codepage. Returns: WinRegistryFile: Windows Registry file or None.
def Open(self, path, ascii_codepage='cp1252'): path_specification = self._path_resolver.ResolvePath(path) if path_specification is None: return None return self._OpenPathSpec(path_specification)
288,292
Collects values from Windows Registry values. Args: artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts definitions registry. knowledge_base (KnowledgeBase): to fill with preprocessing information. searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess the file system. file_system (dfvfs.FileSystem): file system to be preprocessed.
def CollectFromFileSystem( cls, artifacts_registry, knowledge_base, searcher, file_system): for preprocess_plugin in cls._file_system_plugins.values(): artifact_definition = artifacts_registry.GetDefinitionByName( preprocess_plugin.ARTIFACT_DEFINITION_NAME) if not artifact_definition: logger.warning('Missing artifact definition: {0:s}'.format( preprocess_plugin.ARTIFACT_DEFINITION_NAME)) continue logger.debug('Running file system preprocessor plugin: {0:s}'.format( preprocess_plugin.ARTIFACT_DEFINITION_NAME)) try: preprocess_plugin.Collect( knowledge_base, artifact_definition, searcher, file_system) except (IOError, errors.PreProcessFail) as exception: logger.warning(( 'Unable to collect value from artifact definition: {0:s} ' 'with error: {1!s}').format( preprocess_plugin.ARTIFACT_DEFINITION_NAME, exception))
288,293
Collects values from knowledge base values. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information.
def CollectFromKnowledgeBase(cls, knowledge_base): for preprocess_plugin in cls._knowledge_base_plugins.values(): logger.debug('Running knowledge base preprocessor plugin: {0:s}'.format( preprocess_plugin.__class__.__name__)) try: preprocess_plugin.Collect(knowledge_base) except errors.PreProcessFail as exception: logger.warning( 'Unable to collect knowledge base value with error: {0!s}'.format( exception))
288,294
Collects values from Windows Registry values. Args: artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts definitions registry. knowledge_base (KnowledgeBase): to fill with preprocessing information. searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to preprocess the Windows Registry.
def CollectFromWindowsRegistry( cls, artifacts_registry, knowledge_base, searcher): for preprocess_plugin in cls._windows_registry_plugins.values(): artifact_definition = artifacts_registry.GetDefinitionByName( preprocess_plugin.ARTIFACT_DEFINITION_NAME) if not artifact_definition: logger.warning('Missing artifact definition: {0:s}'.format( preprocess_plugin.ARTIFACT_DEFINITION_NAME)) continue logger.debug('Running Windows Registry preprocessor plugin: {0:s}'.format( preprocess_plugin.ARTIFACT_DEFINITION_NAME)) try: preprocess_plugin.Collect(knowledge_base, artifact_definition, searcher) except (IOError, errors.PreProcessFail) as exception: logger.warning(( 'Unable to collect value from artifact definition: {0:s} ' 'with error: {1!s}').format( preprocess_plugin.ARTIFACT_DEFINITION_NAME, exception))
288,295
Deregisters an preprocess plugin class. Args: plugin_class (type): preprocess plugin class. Raises: KeyError: if plugin class is not set for the corresponding name. TypeError: if the source type of the plugin class is not supported.
def DeregisterPlugin(cls, plugin_class): name = getattr( plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__) name = name.lower() if name not in cls._plugins: raise KeyError( 'Artifact plugin class not set for name: {0:s}.'.format(name)) del cls._plugins[name] if name in cls._file_system_plugins: del cls._file_system_plugins[name] if name in cls._knowledge_base_plugins: del cls._knowledge_base_plugins[name] if name in cls._windows_registry_plugins: del cls._windows_registry_plugins[name]
288,296
Registers an preprocess plugin class. Args: plugin_class (type): preprocess plugin class. Raises: KeyError: if plugin class is already set for the corresponding name. TypeError: if the source type of the plugin class is not supported.
def RegisterPlugin(cls, plugin_class): name = getattr( plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__) name = name.lower() if name in cls._plugins: raise KeyError( 'Artifact plugin class already set for name: {0:s}.'.format(name)) preprocess_plugin = plugin_class() cls._plugins[name] = preprocess_plugin if isinstance( preprocess_plugin, interface.FileSystemArtifactPreprocessorPlugin): cls._file_system_plugins[name] = preprocess_plugin elif isinstance( preprocess_plugin, interface.KnowledgeBasePreprocessorPlugin): cls._knowledge_base_plugins[name] = preprocess_plugin elif isinstance( preprocess_plugin, interface.WindowsRegistryKeyArtifactPreprocessorPlugin): cls._windows_registry_plugins[name] = preprocess_plugin
288,298
Runs the preprocessing plugins. Args: artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts definitions registry. file_system (dfvfs.FileSystem): file system to be preprocessed. mount_point (dfvfs.PathSpec): mount point path specification that refers to the base location of the file system. knowledge_base (KnowledgeBase): to fill with preprocessing information.
def RunPlugins( cls, artifacts_registry, file_system, mount_point, knowledge_base): searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point) cls.CollectFromFileSystem( artifacts_registry, knowledge_base, searcher, file_system) # Run the Registry plugins separately so we do not have to open # Registry files for every preprocess plugin. environment_variables = None if knowledge_base: environment_variables = knowledge_base.GetEnvironmentVariables() registry_file_reader = FileSystemWinRegistryFileReader( file_system, mount_point, environment_variables=environment_variables) win_registry = dfwinreg_registry.WinRegistry( registry_file_reader=registry_file_reader) searcher = registry_searcher.WinRegistrySearcher(win_registry) cls.CollectFromWindowsRegistry( artifacts_registry, knowledge_base, searcher) cls.CollectFromKnowledgeBase(knowledge_base) if not knowledge_base.HasUserAccounts(): logger.warning('Unable to find any user accounts on the system.')
288,299
Parses a compound ZIP file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): display_name = parser_mediator.GetDisplayName() if not zipfile.is_zipfile(file_object): raise errors.UnableToParseFile( '[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format( self.NAME, display_name, 'Not a Zip file.')) try: zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True) self._ProcessZipFileWithPlugins(parser_mediator, zip_file) zip_file.close() # Some non-ZIP files return true for is_zipfile but will fail with a # negative seek (IOError) or another error. except (zipfile.BadZipfile, struct.error) as exception: raise errors.UnableToParseFile( '[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format( self.NAME, display_name, exception))
288,300
Processes a zip file using all compound zip files. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. zip_file (zipfile.ZipFile): the zip file. It should not be closed in this method, but will be closed in ParseFileObject().
def _ProcessZipFileWithPlugins(self, parser_mediator, zip_file): archive_members = zip_file.namelist() for plugin in self._plugins: try: plugin.UpdateChainAndProcess( parser_mediator, zip_file=zip_file, archive_members=archive_members) except errors.WrongCompoundZIPPlugin as exception: logger.debug('[{0:s}] wrong plugin: {1!s}'.format( self.NAME, exception))
288,301
Gets the year from a POSIX timestamp The POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC. Args: posix_time: An integer containing the number of seconds since 1970-01-01 00:00:00 UTC. timezone: Optional timezone of the POSIX timestamp. Returns: The year of the POSIX timestamp. Raises: ValueError: If the posix timestamp is out of the range of supported values.
def GetYearFromPosixTime(posix_time, timezone=pytz.UTC): datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone) return datetime_object.year
288,302
Parses a log record structure. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): structure parsed from the log file.
def ParseRecord(self, parser_mediator, key, structure): if key != 'logline': logger.warning( 'Unable to parse record, unknown structure: {0:s}'.format(key)) return try: timestamp = int(structure.timestamp) except ValueError: logger.debug('Invalid timestamp string {0:s}, skipping record'.format( structure.timestamp)) return try: nickname, text = self._StripThenGetNicknameAndText(structure.text) except pyparsing.ParseException: logger.debug('Error parsing entry at offset {0:d}'.format(self._offset)) return event_data = XChatScrollbackEventData() event_data.nickname = nickname event_data.offset = self._offset event_data.text = text date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
288,311
Verify that this file is a XChat scrollback log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line was successfully parsed.
def VerifyStructure(self, parser_mediator, line): structure = self.LOG_LINE try: parsed_structure = structure.parseString(line) except pyparsing.ParseException: logger.debug('Not a XChat scrollback log file') return False try: int(parsed_structure.timestamp, 10) except ValueError: logger.debug('Not a XChat scrollback log file, invalid timestamp string') return False return True
288,312
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): if registry_key is None: return values_dict = {} for value_name in self._VALUE_NAMES: registry_value = registry_key.GetValueByName(value_name) if not registry_value: continue value_data = registry_value.GetDataAsObject() if value_data is None: continue values_dict[value_name] = value_data event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,313
Parses a Container_# table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. table (pyesedb.table): table. container_name (str): container name, which indicates the table type. Raises: ValueError: if the table value is missing.
def _ParseContainerTable(self, parser_mediator, table, container_name): if table is None: raise ValueError('Missing table value.') for record_index, esedb_record in enumerate(table.records): if parser_mediator.abort: break # TODO: add support for: # wpnidm, iecompat, iecompatua, DNTException, DOMStore if container_name == 'Content': value_mappings = self._CONTAINER_TABLE_VALUE_MAPPINGS else: value_mappings = None try: record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record, value_mappings=value_mappings) except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning(( 'Unable to retrieve record values from record: {0:d} ' 'in table: {1:s}').format(record_index, table.name)) continue if (container_name in self._SUPPORTED_CONTAINER_NAMES or container_name.startswith('MSHist')): access_count = record_values.get('AccessCount', None) cached_filename = record_values.get('Filename', None) cached_file_size = record_values.get('FileSize', None) cache_identifier = record_values.get('CacheId', None) container_identifier = record_values.get('ContainerId', None) entry_identifier = record_values.get('EntryId', None) file_extension = record_values.get('FileExtension', None) redirect_url = record_values.get('RedirectUrl', None) sync_count = record_values.get('SyncCount', None) url = record_values.get('Url', '') # Ignore an URL that start with a binary value. if ord(url[0]) < 0x20 or ord(url[0]) == 0x7f: url = None request_headers = record_values.get('RequestHeaders', None) # Ignore non-Unicode request headers values. if not isinstance(request_headers, py2to3.UNICODE_TYPE): request_headers = None response_headers = record_values.get('ResponseHeaders', None) # Ignore non-Unicode response headers values. if not isinstance(response_headers, py2to3.UNICODE_TYPE): response_headers = None event_data = MsieWebCacheContainerEventData() event_data.access_count = access_count event_data.cached_filename = cached_filename event_data.cached_file_size = cached_file_size event_data.cache_identifier = cache_identifier event_data.container_identifier = container_identifier event_data.entry_identifier = entry_identifier event_data.file_extension = file_extension event_data.redirect_url = redirect_url event_data.request_headers = request_headers event_data.response_headers = response_headers event_data.sync_count = sync_count event_data.url = url timestamp = record_values.get('SyncTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Synchronization time') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('CreationTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('ExpiryTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('ModifiedTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('AccessedTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('PostCheckTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Post check time') parser_mediator.ProduceEventWithEventData(event, event_data)
288,318
Parses the Containers table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
def ParseContainersTable( self, parser_mediator, database=None, table=None, **unused_kwargs): if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = MsieWebCacheContainersEventData() event_data.container_identifier = record_values.get('ContainerId', None) event_data.directory = record_values.get('Directory', None) event_data.name = record_values.get('Name', None) event_data.set_identifier = record_values.get('SetId', None) timestamp = record_values.get('LastScavengeTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Last Scavenge Time') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('LastAccessTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) container_identifier = record_values.get('ContainerId', None) container_name = record_values.get('Name', None) if not container_identifier or not container_name: continue table_name = 'Container_{0:d}'.format(container_identifier) esedb_table = database.get_table_by_name(table_name) if not esedb_table: parser_mediator.ProduceExtractionWarning( 'Missing table: {0:s}'.format(table_name)) continue self._ParseContainerTable(parser_mediator, esedb_table, container_name)
288,319
Parses the LeakFiles table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
def ParseLeakFilesTable( self, parser_mediator, database=None, table=None, **unused_kwargs): if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = MsieWebCacheLeakFilesEventData() event_data.cached_filename = record_values.get('Filename', None) event_data.leak_identifier = record_values.get('LeakId', None) timestamp = record_values.get('CreationTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,320
Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
def ParsePartitionsTable( self, parser_mediator, database=None, table=None, **unused_kwargs): if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = MsieWebCachePartitionsEventData() event_data.directory = record_values.get('Directory', None) event_data.partition_identifier = record_values.get('PartitionId', None) event_data.partition_type = record_values.get('PartitionType', None) event_data.table_identifier = record_values.get('TableId', None) timestamp = record_values.get('LastScavengeTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Last Scavenge Time') parser_mediator.ProduceEventWithEventData(event, event_data)
288,321
Parses a record and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (file): file-like object. record_offset (int): offset of the record relative to the start of the file. Returns: int: next record offset. Raises: ParseError: if the record cannot be parsed.
def _ParseRecord(self, parser_mediator, file_object, record_offset): record_strings_data_offset = file_object.tell() record_strings_data_size = record_offset - record_strings_data_offset record_strings_data = self._ReadData( file_object, record_strings_data_offset, record_strings_data_size) record_map = self._GetDataTypeMap('asl_record') try: record, record_data_size = self._ReadStructureFromFileObject( file_object, record_offset, record_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse record at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) hostname = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.hostname_string_offset) sender = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.sender_string_offset) facility = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.facility_string_offset) message = self._ParseRecordString( record_strings_data, record_strings_data_offset, record.message_string_offset) file_offset = record_offset + record_data_size additional_data_size = record.data_size + 6 - record_data_size if additional_data_size % 8 != 0: raise errors.ParseError( 'Invalid record additional data size: {0:d}.'.format( additional_data_size)) additional_data = self._ReadData( file_object, file_offset, additional_data_size) extra_fields = {} for additional_data_offset in range(0, additional_data_size - 8, 16): record_extra_field = self._ParseRecordExtraField( additional_data[additional_data_offset:], file_offset) file_offset += 16 name = self._ParseRecordString( record_strings_data, record_strings_data_offset, record_extra_field.name_string_offset) value = self._ParseRecordString( record_strings_data, record_strings_data_offset, record_extra_field.value_string_offset) if name is not None: extra_fields[name] = value # TODO: implement determine previous record offset event_data = ASLEventData() event_data.computer_name = hostname event_data.extra_information = ', '.join([ '{0:s}: {1:s}'.format(name, value) for name, value in sorted(extra_fields.items())]) event_data.facility = facility event_data.group_id = record.group_identifier event_data.level = record.alert_level event_data.message_id = record.message_identifier event_data.message = message event_data.pid = record.process_identifier event_data.read_gid = record.real_group_identifier event_data.read_uid = record.real_user_identifier event_data.record_position = record_offset event_data.sender = sender # Note that the user_sid value is expected to be a string. event_data.user_sid = '{0:d}'.format(record.user_identifier) microseconds, _ = divmod(record.written_time_nanoseconds, 1000) timestamp = (record.written_time * 1000000) + microseconds # TODO: replace by PosixTimeInNanoseconds. date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) # TODO: replace by written time. event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) return record.next_record_offset
288,324
Parses a record extra field. Args: byte_stream (bytes): byte stream. file_offset (int): offset of the record extra field relative to the start of the file. Returns: asl_record_extra_field: record extra field. Raises: ParseError: if the record extra field cannot be parsed.
def _ParseRecordExtraField(self, byte_stream, file_offset): extra_field_map = self._GetDataTypeMap('asl_record_extra_field') try: record_extra_field = self._ReadStructureFromByteStream( byte_stream, file_offset, extra_field_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse record extra field at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) return record_extra_field
288,325
Parses a record string. Args: record_strings_data (bytes): record strings data. record_strings_data_offset (int): offset of the record strings data relative to the start of the file. string_offset (int): offset of the string relative to the start of the file. Returns: str: record string or None if string offset is 0. Raises: ParseError: if the record string cannot be parsed.
def _ParseRecordString( self, record_strings_data, record_strings_data_offset, string_offset): if string_offset == 0: return None if string_offset & self._STRING_OFFSET_MSB: if (string_offset >> 60) != 8: raise errors.ParseError('Invalid inline record string flag.') string_size = (string_offset >> 56) & 0x0f if string_size >= 8: raise errors.ParseError('Invalid inline record string size.') string_data = bytes(bytearray([ string_offset >> (8 * byte_index) & 0xff for byte_index in range(6, -1, -1)])) try: return string_data[:string_size].decode('utf-8') except UnicodeDecodeError as exception: raise errors.ParseError( 'Unable to decode inline record string with error: {0!s}.'.format( exception)) data_offset = string_offset - record_strings_data_offset record_string_map = self._GetDataTypeMap('asl_record_string') try: record_string = self._ReadStructureFromByteStream( record_strings_data[data_offset:], string_offset, record_string_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse record string at offset: 0x{0:08x} with error: ' '{1!s}').format(string_offset, exception)) return record_string.string.rstrip('\x00')
288,326
Parses an ASL file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
def ParseFileObject(self, parser_mediator, file_object): file_header_map = self._GetDataTypeMap('asl_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) if file_header.signature != self._FILE_SIGNATURE: raise errors.UnableToParseFile('Invalid file signature.') # TODO: generate event for creation time. file_size = file_object.get_size() if file_header.first_log_entry_offset > 0: last_log_entry_offset = 0 file_offset = file_header.first_log_entry_offset while file_offset < file_size: last_log_entry_offset = file_offset try: file_offset = self._ParseRecord( parser_mediator, file_object, file_offset) except errors.ParseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse record with error: {0!s}'.format(exception)) return if file_offset == 0: break if last_log_entry_offset != file_header.last_log_entry_offset: parser_mediator.ProduceExtractionWarning( 'last log entry offset does not match value in file header.')
288,327
Initializes and builds the path filter table from a list of paths. Args: paths: a list of strings containing the paths. ignore_list: a list of path segment indexes to ignore, where 0 is the index of the first path segment relative from the root. path_segment_separator: optional string containing the path segment separator.
def __init__(self, paths, ignore_list, path_segment_separator='/'): super(_PathFilterTable, self).__init__() self._path_segment_separator = path_segment_separator self.path_segments_per_index = {} self.paths = list(paths) for path in self.paths: self._AddPathSegments(path, ignore_list)
288,328
Adds the path segments to the table. Args: path: a string containing the path. ignore_list: a list of path segment indexes to ignore, where 0 is the index of the first path segment relative from the root.
def _AddPathSegments(self, path, ignore_list): path_segments = path.split(self._path_segment_separator) for path_segment_index, path_segment in enumerate(path_segments): if path_segment_index not in self.path_segments_per_index: self.path_segments_per_index[path_segment_index] = {} if path_segment_index not in ignore_list: path_segments = self.path_segments_per_index[path_segment_index] if path_segment not in path_segments: path_segments[path_segment] = [] paths_per_segment_list = path_segments[path_segment] paths_per_segment_list.append(path)
288,329
Adds a path segment index and sets its weight to 0. Args: path_segment_index: an integer containing the path segment index. Raises: ValueError: if the path segment weights already contains the path segment index.
def AddIndex(self, path_segment_index): if path_segment_index in self._weight_per_index: raise ValueError('Path segment index already set.') self._weight_per_index[path_segment_index] = 0
288,332
Adds a weight for a specific path segment index. Args: path_segment_index: an integer containing the path segment index. weight: an integer containing the weight. Raises: ValueError: if the path segment weights do not contain the path segment index.
def AddWeight(self, path_segment_index, weight): if path_segment_index not in self._weight_per_index: raise ValueError('Path segment index not set.') self._weight_per_index[path_segment_index] += weight if weight not in self._indexes_per_weight: self._indexes_per_weight[weight] = [] self._indexes_per_weight[weight].append(path_segment_index)
288,333
Sets a weight for a specific path segment index. Args: path_segment_index: an integer containing the path segment index. weight: an integer containing the weight. Raises: ValueError: if the path segment weights do not contain the path segment index.
def SetWeight(self, path_segment_index, weight): if path_segment_index not in self._weight_per_index: raise ValueError('Path segment index not set.') self._weight_per_index[path_segment_index] = weight if weight not in self._indexes_per_weight: self._indexes_per_weight[weight] = [] self._indexes_per_weight[weight].append(path_segment_index)
288,334
Initializes and builds a path filter scan tree. Args: paths: a list of strings containing the paths. case_sensitive: optional boolean value to indicate string matches should be case sensitive. path_segment_separator: optional string containing the path segment separator.
def __init__(self, paths, case_sensitive=True, path_segment_separator='/'): super(PathFilterScanTree, self).__init__() self._case_sensitive = case_sensitive self._path_segment_separator = path_segment_separator self._root_node = None if not self._case_sensitive: paths = [path.lower() for path in paths] path_filter_table = _PathFilterTable( paths, [], path_segment_separator=self._path_segment_separator) if path_filter_table.paths: self._root_node = self._BuildScanTreeNode(path_filter_table, [])
288,336
Retrieves the index of the path segment based on occurrence weights. Args: occurrence_weights: the occurrence weights object (instance of _PathSegmentWeights). value_weights: the value weights object (instance of _PathSegmentWeights). Returns: An integer containing the path segment index.
def _GetPathSegmentIndexForOccurrenceWeights( self, occurrence_weights, value_weights): largest_weight = occurrence_weights.GetLargestWeight() if largest_weight > 0: occurrence_weight_indexes = occurrence_weights.GetIndexesForWeight( largest_weight) number_of_occurrence_indexes = len(occurrence_weight_indexes) else: number_of_occurrence_indexes = 0 path_segment_index = None if number_of_occurrence_indexes == 0: path_segment_index = self._GetPathSegmentIndexForValueWeights( value_weights) elif number_of_occurrence_indexes == 1: path_segment_index = occurrence_weight_indexes[0] else: largest_weight = 0 for occurrence_index in occurrence_weight_indexes: value_weight = value_weights.GetWeightForIndex(occurrence_index) if not path_segment_index or largest_weight < value_weight: largest_weight = value_weight path_segment_index = occurrence_index return path_segment_index
288,339
Retrieves the index of the path segment based on similarity weights. Args: similarity_weights: the similarity weights object (instance of _PathSegmentWeights). occurrence_weights: the occurrence weights object (instance of _PathSegmentWeights). value_weights: the value weights object (instance of _PathSegmentWeights). Returns: An integer containing the path segment index.
def _GetPathSegmentIndexForSimilarityWeights( self, similarity_weights, occurrence_weights, value_weights): largest_weight = similarity_weights.GetLargestWeight() if largest_weight > 0: similarity_weight_indexes = similarity_weights.GetIndexesForWeight( largest_weight) number_of_similarity_indexes = len(similarity_weight_indexes) else: number_of_similarity_indexes = 0 path_segment_index = None if number_of_similarity_indexes == 0: path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights( occurrence_weights, value_weights) elif number_of_similarity_indexes == 1: path_segment_index = similarity_weight_indexes[0] else: largest_weight = 0 largest_value_weight = 0 for similarity_index in similarity_weight_indexes: occurrence_weight = occurrence_weights.GetWeightForIndex( similarity_index) if largest_weight > 0 and largest_weight == occurrence_weight: value_weight = value_weights.GetWeightForIndex(similarity_index) if largest_value_weight < value_weight: largest_weight = 0 if not path_segment_index or largest_weight < occurrence_weight: largest_weight = occurrence_weight path_segment_index = similarity_index largest_value_weight = value_weights.GetWeightForIndex( similarity_index) return path_segment_index
288,340
Retrieves the index of the path segment based on value weights. Args: value_weights: the value weights object (instance of _PathSegmentWeights). Returns: An integer containing the path segment index. Raises: RuntimeError: is no path segment index can be found.
def _GetPathSegmentIndexForValueWeights(self, value_weights): largest_weight = value_weights.GetLargestWeight() if largest_weight > 0: value_weight_indexes = value_weights.GetIndexesForWeight(largest_weight) else: value_weight_indexes = [] if value_weight_indexes: path_segment_index = value_weight_indexes[0] else: path_segment_index = value_weights.GetFirstAvailableIndex() if path_segment_index is None: raise RuntimeError('No path segment index found.') return path_segment_index
288,341
Checks if a path matches the scan tree-based path filter. Args: path: a string containing the path. path_segment_separator: optional string containing the path segment separator. None defaults to the path segment separator that was set when the path filter scan tree was initialized. Returns: A boolean indicating if the path matches the filter.
def CheckPath(self, path, path_segment_separator=None): if not self._case_sensitive: path = path.lower() if path_segment_separator is None: path_segment_separator = self._path_segment_separator path_segments = path.split(path_segment_separator) number_of_path_segments = len(path_segments) scan_object = self._root_node while scan_object: if isinstance(scan_object, py2to3.STRING_TYPES): break if scan_object.path_segment_index >= number_of_path_segments: scan_object = scan_object.default_value continue path_segment = path_segments[scan_object.path_segment_index] scan_object = scan_object.GetScanObject(path_segment) if not isinstance(scan_object, py2to3.STRING_TYPES): return False filter_path_segments = scan_object.split(self._path_segment_separator) return filter_path_segments == path_segments
288,342
Initializes a path filter scan tree node. Args: path_segment_index: an integer containing the path segment index.
def __init__(self, path_segment_index): super(PathFilterScanTreeNode, self).__init__() self._path_segments = {} self.default_value = None self.parent = None self.path_segment_index = path_segment_index
288,343
Adds a path segment. Args: path_segment: a string containing the path segment. scan_object: a scan object, either a scan tree sub node (instance of PathFilterScanTreeNode) or a string containing a path. Raises: ValueError: if the node already contains a scan object for the path segment.
def AddPathSegment(self, path_segment, scan_object): if path_segment in self._path_segments: raise ValueError('Path segment already set.') if isinstance(scan_object, PathFilterScanTreeNode): scan_object.parent = self self._path_segments[path_segment] = scan_object
288,344
Sets the default (non-match) value. Args: scan_object: a scan object, either a scan tree sub node (instance of PathFilterScanTreeNode) or a string containing a path. Raises: TypeError: if the scan object is of an unsupported type. ValueError: if the default value is already set.
def SetDefaultValue(self, scan_object): if (not isinstance(scan_object, PathFilterScanTreeNode) and not isinstance(scan_object, py2to3.STRING_TYPES)): raise TypeError('Unsupported scan object type.') if self.default_value: raise ValueError('Default value already set.') self.default_value = scan_object
288,345
Converts the path filter scan tree node into a debug string. Args: indentation_level: an integer containing the text indentation level. Returns: A string containing a debug representation of the path filter scan tree node.
def ToDebugString(self, indentation_level=1): indentation = ' ' * indentation_level text_parts = ['{0:s}path segment index: {1:d}\n'.format( indentation, self.path_segment_index)] for path_segment, scan_object in self._path_segments.items(): text_parts.append('{0:s}path segment: {1:s}\n'.format( indentation, path_segment)) if isinstance(scan_object, PathFilterScanTreeNode): text_parts.append('{0:s}scan tree node:\n'.format(indentation)) text_parts.append(scan_object.ToDebugString(indentation_level + 1)) elif isinstance(scan_object, py2to3.STRING_TYPES): text_parts.append('{0:s}path: {1:s}\n'.format( indentation, scan_object)) text_parts.append('{0:s}default value:\n'.format(indentation)) if isinstance(self.default_value, PathFilterScanTreeNode): text_parts.append('{0:s}scan tree node:\n'.format(indentation)) text_parts.append(self.default_value.ToDebugString(indentation_level + 1)) elif isinstance(self.default_value, py2to3.STRING_TYPES): text_parts.append('{0:s}pattern: {1:s}\n'.format( indentation, self.default_value)) text_parts.append('\n') return ''.join(text_parts)
288,346
Parses the MRUListEx value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUListEx value. Returns: mrulistex_entries: MRUListEx entries or None if not available.
def _ParseMRUListExValue(self, registry_key): mrulistex_value = registry_key.GetValueByName('MRUListEx') # The key exists but does not contain a value named "MRUList". if not mrulistex_value: return None mrulistex_entries_map = self._GetDataTypeMap('mrulistex_entries') context = dtfabric_data_maps.DataTypeMapContext(values={ 'data_size': len(mrulistex_value.data)}) return self._ReadStructureFromByteStream( mrulistex_value.data, 0, mrulistex_entries_map, context=context)
288,348
Extract event objects from a MRUListEx Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. parent_path_segments (list[str]): parent shell item path segments. codepage (Optional[str]): extended ASCII string codepage.
def _ParseSubKey( self, parser_mediator, registry_key, parent_path_segments, codepage='cp1252'): try: mrulistex = self._ParseMRUListExValue(registry_key) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse MRUListEx value with error: {0!s}'.format(exception)) return if not mrulistex: return entry_numbers = {} values_dict = {} value_strings = {} found_terminator = False for index, entry_number in enumerate(mrulistex): # The MRU list is terminated with -1 (0xffffffff). if entry_number == -1: continue if found_terminator: parser_mediator.ProduceExtractionWarning(( 'found additional MRUListEx entries after terminator in key: ' '{0:s}.').format(registry_key.path)) # Only create one parser error per terminator. found_terminator = False path_segment = self._ParseMRUListExEntryValue( parser_mediator, registry_key, index, entry_number, values_dict, value_strings, parent_path_segments, codepage=codepage) entry_numbers[entry_number] = path_segment event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) for entry_number, path_segment in iter(entry_numbers.items()): sub_key_name = '{0:d}'.format(entry_number) sub_key = registry_key.GetSubkeyByName(sub_key_name) if not sub_key: parser_mediator.ProduceExtractionWarning( 'Missing BagMRU sub key: {0:d} in key: {1:s}.'.format( entry_number, registry_key.path)) continue parent_path_segments.append(path_segment) self._ParseSubKey( parser_mediator, sub_key, parent_path_segments, codepage=codepage) parent_path_segments.pop()
288,349
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. codepage (Optional[str]): extended ASCII string codepage.
def ExtractEvents( self, parser_mediator, registry_key, codepage='cp1252', **kwargs): self._ParseSubKey(parser_mediator, registry_key, [], codepage=codepage)
288,350
Initializes an event. Args: timestamp (int): timestamp, which contains the number of microseconds since January 1, 1970, 00:00:00 UTC. timestamp_description (str): description of the meaning of the timestamp value. data_type (Optional[str]): event data type. If the data type is not set it is derived from the DATA_TYPE class attribute.
def __init__(self, timestamp, timestamp_description, data_type=None): super(TimestampEvent, self).__init__() self.timestamp = timestamp self.timestamp_desc = timestamp_description if data_type: self.data_type = data_type
288,351
Initializes an event. Args: date_time (dfdatetime.DateTimeValues): date and time values. date_time_description (str): description of the meaning of the date and time values. data_type (Optional[str]): event data type. If the data type is not set it is derived from the DATA_TYPE class attribute. time_zone (Optional[datetime.tzinfo]): time zone.
def __init__( self, date_time, date_time_description, data_type=None, time_zone=None): timestamp = date_time.GetPlasoTimestamp() if date_time.is_local_time and time_zone: timestamp = timelib.Timestamp.LocaltimeToUTC(timestamp, time_zone) super(DateTimeValuesEvent, self).__init__( timestamp, date_time_description, data_type=data_type)
288,352
Initializes an event. Args: datetime_value (datetime.datetime): date and time values. date_time_description (str): description of the meaning of the date and time values. data_type (Optional[str]): event data type. If the data type is not set it is derived from the DATA_TYPE class attribute. time_zone (Optional[datetime.tzinfo]): time zone.
def __init__( self, datetime_value, date_time_description, data_type=None, time_zone=None): year, month, day_of_month, hours, minutes, seconds, _, _, _ = ( datetime_value.utctimetuple()) time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds, datetime_value.microsecond) date_time = dfdatetime_time_elements.TimeElementsInMicroseconds( time_elements_tuple=time_elements_tuple) super(PythonDatetimeEvent, self).__init__( date_time, date_time_description, data_type=data_type, time_zone=time_zone)
288,353
Extracts relevant MacOS update entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): version = match.get('LastAttemptSystemVersion', 'N/A') pending = match.get('LastUpdatesAvailable', None) event_data = plist_event.PlistTimeEventData() event_data.desc = 'Last MacOS {0:s} full update.'.format(version) event_data.key = '' event_data.root = '/' datetime_value = match.get('LastFullSuccessfulDate', None) if datetime_value: event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) datetime_value = match.get('LastSuccessfulDate', None) if datetime_value and pending: software = [] for update in match.get('RecommendedUpdates', []): identifier = update.get('Identifier', '<IDENTIFIER>') product_key = update.get('Product Key', '<PRODUCT_KEY>') software.append('{0:s}({1:s})'.format(identifier, product_key)) if not software: return software = ','.join(software) event_data.desc = ( 'Last Mac OS {0!s} partially update, pending {1!s}: ' '{2:s}.').format(version, pending, software) event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
288,354
Parses Windows Registry value data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. value_data (object): Windows Registry value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseValueData(self, knowledge_base, value_data): if not isinstance(value_data, py2to3.UNICODE_TYPE): raise errors.PreProcessFail( 'Unsupported Windows Registry value type: {0:s} for ' 'artifact: {1:s}.'.format( type(value_data), self.ARTIFACT_DEFINITION_NAME)) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name=self._NAME, value=value_data) try: logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format( self._NAME, value_data)) knowledge_base.AddEnvironmentVariable(environment_variable) except KeyError: # TODO: add and store preprocessing errors. pass
288,355
Collects values from the knowledge base. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. Raises: PreProcessFail: if the preprocessing fails.
def Collect(self, knowledge_base): environment_variable = knowledge_base.GetEnvironmentVariable('programdata') allusersappdata = getattr(environment_variable, 'value', None) if not allusersappdata: environment_variable = knowledge_base.GetEnvironmentVariable( 'allusersprofile') allusersdata = getattr(environment_variable, 'value', None) if allusersdata: allusersappdata = '\\'.join([allusersdata, 'Application Data']) if allusersappdata: environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='allusersappdata', value=allusersappdata) try: logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format( 'allusersappdata', allusersappdata)) knowledge_base.AddEnvironmentVariable(environment_variable) except KeyError: # TODO: add and store preprocessing errors. pass
288,357
Parses Windows Registry value data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. value_data (object): Windows Registry value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseValueData(self, knowledge_base, value_data): if not isinstance(value_data, py2to3.UNICODE_TYPE): raise errors.PreProcessFail( 'Unsupported Windows Registry value type: {0:s} for ' 'artifact: {1:s}.'.format( type(value_data), self.ARTIFACT_DEFINITION_NAME)) # Map the Windows code page name to a Python equivalent name. codepage = 'cp{0:s}'.format(value_data) if not knowledge_base.codepage: try: knowledge_base.SetCodepage(codepage) except ValueError: # TODO: add and store preprocessing errors. pass
288,358
Parses Windows Registry value data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. value_data (object): Windows Registry value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseValueData(self, knowledge_base, value_data): if not isinstance(value_data, py2to3.UNICODE_TYPE): raise errors.PreProcessFail( 'Unsupported Windows Registry value type: {0:s} for ' 'artifact: {1:s}.'.format( type(value_data), self.ARTIFACT_DEFINITION_NAME)) if not knowledge_base.GetHostname(): hostname_artifact = artifacts.HostnameArtifact(name=value_data) knowledge_base.SetHostname(hostname_artifact)
288,359
Collects values from the knowledge base. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. Raises: PreProcessFail: if the preprocessing fails.
def Collect(self, knowledge_base): environment_variable = knowledge_base.GetEnvironmentVariable( 'programdata') allusersprofile = getattr(environment_variable, 'value', None) if not allusersprofile: environment_variable = knowledge_base.GetEnvironmentVariable( 'allusersprofile') allusersprofile = getattr(environment_variable, 'value', None) if allusersprofile: environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='programdata', value=allusersprofile) try: logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format( 'programdata', allusersprofile)) knowledge_base.AddEnvironmentVariable(environment_variable) except KeyError: # TODO: add and store preprocessing errors. pass
288,360
Parses Windows Registry value data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. value_data (object): Windows Registry value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseValueData(self, knowledge_base, value_data): if not isinstance(value_data, py2to3.UNICODE_TYPE): raise errors.PreProcessFail( 'Unsupported Windows Registry value type: {0:s} for ' 'artifact: {1:s}.'.format( type(value_data), self.ARTIFACT_DEFINITION_NAME)) if not knowledge_base.GetValue('operating_system_product'): knowledge_base.SetValue('operating_system_product', value_data)
288,361
Parses Windows Registry value data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. value_data (object): Windows Registry value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseValueData(self, knowledge_base, value_data): if not isinstance(value_data, py2to3.UNICODE_TYPE): raise errors.PreProcessFail( 'Unsupported Windows Registry value type: {0:s} for ' 'artifact: {1:s}.'.format( type(value_data), self.ARTIFACT_DEFINITION_NAME)) # Map the Windows time zone name to a Python equivalent name. lookup_key = value_data.replace(' ', '') time_zone = time_zones.TIME_ZONES.get(lookup_key, value_data) # TODO: check if time zone is set in knowledge base. if time_zone: try: # Catch and warn about unsupported preprocessor plugin. knowledge_base.SetTimeZone(time_zone) except ValueError: # TODO: add and store preprocessing errors. time_zone = value_data logger.warning('Unable to map: "{0:s}" to time zone'.format( value_data))
288,362
Retrieves the username from a Windows profile path. Trailing path path segment are ignored. Args: path (str): a Windows path with '\\' as path segment separator. Returns: str: basename which is the last path segment.
def _GetUsernameFromProfilePath(self, path): # Strip trailing key separators. while path and path[-1] == '\\': path = path[:-1] if path: _, _, path = path.rpartition('\\') return path
288,363
Parses a Windows Registry key for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. value_name (str): name of the Windows Registry value. Raises: errors.PreProcessFail: if the preprocessing fails.
def _ParseKey(self, knowledge_base, registry_key, value_name): user_account = artifacts.UserAccountArtifact( identifier=registry_key.name, path_separator='\\') registry_value = registry_key.GetValueByName('ProfileImagePath') if registry_value: profile_path = registry_value.GetDataAsObject() username = self._GetUsernameFromProfilePath(profile_path) user_account.user_directory = profile_path or None user_account.username = username or None try: knowledge_base.AddUserAccount(user_account) except KeyError: # TODO: add and store preprocessing errors. pass
288,364
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--status_view', '--status-view', dest='status_view_mode', choices=['linear', 'none', 'window'], action='store', metavar='TYPE', default=status_view.StatusView.MODE_WINDOW, help=( 'The processing status view mode: "linear", "none" or "window".'))
288,365
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') status_view_mode = cls._ParseStringOption( options, 'status_view_mode', default_value=status_view.StatusView.MODE_WINDOW) setattr(configuration_object, '_status_view_mode', status_view_mode)
288,366
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--append', dest='append', action='store_true', default=False, required=cls._DEFAULT_APPEND, help=( 'Defines whether the intention is to append to an already ' 'existing database or overwrite it. Defaults to overwrite.')) argument_group.add_argument( '--evidence', dest='evidence', type=str, default=cls._DEFAULT_EVIDENCE, action='store', required=False, help='Set the evidence field to a specific value, defaults to empty.') argument_group.add_argument( '--fields', dest='fields', type=str, action='store', default=cls._DEFAULT_FIELDS, help=( 'Defines which fields should be indexed in the database.')) argument_group.add_argument( '--additional_fields', dest='additional_fields', type=str, action='store', default='', help=( 'Defines extra fields to be included in the output, in addition to' ' the default fields, which are {0:s}.'.format( cls._DEFAULT_FIELDS)))
288,367
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (OutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type.
def ParseOptions(cls, options, output_module): if not isinstance(output_module, shared_4n6time.Shared4n6TimeOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of Shared4n6TimeOutputModule') append = getattr(options, 'append', cls._DEFAULT_APPEND) evidence = cls._ParseStringOption( options, 'evidence', default_value=cls._DEFAULT_EVIDENCE) fields = cls._ParseStringOption( options, 'fields', default_value=cls._DEFAULT_FIELDS) additional_fields = cls._ParseStringOption( options, 'additional_fields') if additional_fields: fields = '{0:s},{1:s}'.format(fields, additional_fields) output_module.SetAppendMode(append) output_module.SetEvidence(evidence) output_module.SetFields([ field_name.strip() for field_name in fields.split(',')])
288,368
Initializes the line reader. Args: file_object (FileIO): a file-like object to read from. end_of_line (Optional[bytes]): end of line indicator.
def __init__(self, file_object, end_of_line=b'\n'): super(BinaryLineReader, self).__init__() self._file_object = file_object self._file_object_size = file_object.get_size() self.end_of_line = end_of_line self._end_of_line_length = len(self.end_of_line) self._lines = [] self._lines_buffer = b'' self._lines_buffer_offset = 0 self._current_offset = 0
288,371
Initializes the delimited separated values reader. Args: binary_line_reader (BinaryLineReader): a binary file reader delimiter (bytes): field delimiter.
def __init__(self, binary_line_reader, delimiter): super(BinaryDSVReader, self).__init__() self._line_reader = binary_line_reader self._delimiter = delimiter
288,374
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (OutputModule): analysis_plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
def ParseOptions(cls, options, analysis_plugin): if not isinstance(analysis_plugin, sessionize.SessionizeAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of SessionizeAnalysisPlugin') maximum_pause = cls._ParseNumericOption( options, 'sessionize_maximumpause', default_value=10) if maximum_pause <= 0: raise errors.BadConfigOption( 'Maximum pause value {0:d} is not supported. ' 'Value must be greater than 0.'.format(maximum_pause)) analysis_plugin.SetMaximumPause(maximum_pause)
288,376
Opens a RPC communication channel to the server. Args: hostname (str): hostname or IP address to connect to for requests. port (int): port to connect to for requests. Returns: bool: True if the communication channel was established.
def Open(self, hostname, port): server_url = 'http://{0:s}:{1:d}'.format(hostname, port) try: self._xmlrpc_proxy = xmlrpclib.ServerProxy( server_url, allow_none=True) except SocketServer.socket.error as exception: logger.warning(( 'Unable to connect to RPC server on {0:s}:{1:d} with error: ' '{2!s}').format(hostname, port, exception)) return False return True
288,378
Initialize a threaded RPC server. Args: callback (function): callback function to invoke on get status RPC request.
def __init__(self, callback): super(ThreadedXMLRPCServer, self).__init__(callback) self._rpc_thread = None self._xmlrpc_server = None
288,379
Opens the RPC communication channel for clients. Args: hostname (str): hostname or IP address to connect to for requests. port (int): port to connect to for requests. Returns: bool: True if the communication channel was successfully opened.
def _Open(self, hostname, port): try: self._xmlrpc_server = SimpleXMLRPCServer.SimpleXMLRPCServer( (hostname, port), logRequests=False, allow_none=True) except SocketServer.socket.error as exception: logger.warning(( 'Unable to bind a RPC server on {0:s}:{1:d} with error: ' '{2!s}').format(hostname, port, exception)) return False self._xmlrpc_server.register_function( self._callback, self._RPC_FUNCTION_NAME) return True
288,380
Starts the process status RPC server. Args: hostname (str): hostname or IP address to connect to for requests. port (int): port to connect to for requests. Returns: bool: True if the RPC server was successfully started.
def Start(self, hostname, port): if not self._Open(hostname, port): return False self._rpc_thread = threading.Thread( name=self._THREAD_NAME, target=self._xmlrpc_server.serve_forever) self._rpc_thread.start() return True
288,381
Retrieves a list of the registered analysis plugins. Args: show_all (Optional[bool]): True if all analysis plugin names should be listed. Returns: list[tuple[str, str, str]]: the name, docstring and type string of each analysis plugin in alphabetical order.
def GetAllPluginInformation(cls, show_all=True): results = [] for plugin_class in iter(cls._plugin_classes.values()): plugin_object = plugin_class() if not show_all and not plugin_class.ENABLE_IN_EXTRACTION: continue # TODO: Use a specific description variable, not the docstring. doc_string, _, _ = plugin_class.__doc__.partition('\n') type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type) information_tuple = (plugin_object.plugin_name, doc_string, type_string) results.append(information_tuple) return sorted(results)
288,383
Retrieves the plugin objects. Args: plugin_names (list[str]): names of plugins that should be retrieved. Returns: dict[str, AnalysisPlugin]: analysis plugins per name.
def GetPluginObjects(cls, plugin_names): plugin_objects = {} for plugin_name, plugin_class in iter(cls._plugin_classes.items()): if plugin_name not in plugin_names: continue plugin_objects[plugin_name] = plugin_class() return plugin_objects
288,384
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): argument_group.add_argument( '--name', '--timeline_name', '--timeline-name', dest='timeline_name', type=str, action='store', default=cls._DEFAULT_NAME, required=False, help=( 'The name of the timeline in Timesketch. Default: ' 'hostname if present in the storage file. If no hostname ' 'is found then manual input is used.')) argument_group.add_argument( '--index', dest='index', type=str, action='store', default=cls._DEFAULT_UUID, required=False, help=( 'The name of the Elasticsearch index. Default: Generate a random ' 'UUID')) argument_group.add_argument( '--flush_interval', '--flush-interval', dest='flush_interval', type=int, action='store', default=cls._DEFAULT_FLUSH_INTERVAL, required=False, help=( 'The number of events to queue up before sent in bulk ' 'to Elasticsearch.')) argument_group.add_argument( '--doc_type', dest='document_type', type=str, action='store', default=cls._DEFAULT_DOCUMENT_TYPE, help=( 'Name of the document type that will be used in ElasticSearch.')) argument_group.add_argument( '--username', dest='username', type=str, action='store', default=cls._DEFAULT_USERNAME, help=( 'Username of a Timesketch user that will own the timeline.'))
288,385
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (TimesketchOutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
def ParseOptions(cls, options, output_module): if not isinstance(output_module, timesketch_out.TimesketchOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of TimesketchOutputModule') document_type = cls._ParseStringOption( options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE) output_module.SetDocumentType(document_type) flush_interval = cls._ParseNumericOption( options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL) output_module.SetFlushInterval(flush_interval) index = cls._ParseStringOption( options, 'index', default_value=cls._DEFAULT_UUID) output_module.SetIndexName(index) name = cls._ParseStringOption( options, 'timeline_name', default_value=cls._DEFAULT_NAME) output_module.SetTimelineName(name) username = cls._ParseStringOption( options, 'username', default_value=cls._DEFAULT_USERNAME) output_module.SetTimelineOwner(username)
288,386
Parses a message row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseNotificationcenterRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = MacNotificationCenterEventData() event_data.bundle_name = self._GetRowValue(query_hash, row, 'bundle_name') event_data.presented = self._GetRowValue(query_hash, row, 'presented') blob = self._GetRowValue(query_hash, row, 'dataBlob') try: full_biplist = biplist.readPlistFromString(blob) # req is the 'req' dictionary from the plist containing extra information # about the notification entry. req = full_biplist['req'] except (biplist.InvalidPlistException, KeyError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to read plist from database with error: {0!s}'.format( exception)) return event_data.title = req.get('titl', None) event_data.subtitle = req.get('subt', None) event_data.body = req.get('body', None) timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
288,388
Initializes the CLI tool object. Args: input_reader (Optional[InputReader]): input reader, where None indicates that the stdin input reader should be used. output_writer (Optional[OutputWriter]): output writer, where None indicates that the stdout output writer should be used.
def __init__(self, input_reader=None, output_writer=None): super(PinfoTool, self).__init__( input_reader=input_reader, output_writer=output_writer) self._compare_storage_file_path = None self._output_filename = None self._output_format = None self._process_memory_limit = None self._storage_file_path = None self._verbose = False self.compare_storage_information = False
288,390
Calculates the counters of the entire storage. Args: storage_reader (StorageReader): storage reader. Returns: dict[str,collections.Counter]: storage counters.
def _CalculateStorageCounters(self, storage_reader): analysis_reports_counter = collections.Counter() analysis_reports_counter_error = False event_labels_counter = collections.Counter() event_labels_counter_error = False parsers_counter = collections.Counter() parsers_counter_error = False for session in storage_reader.GetSessions(): # Check for a dict for backwards compatibility. if isinstance(session.analysis_reports_counter, dict): analysis_reports_counter += collections.Counter( session.analysis_reports_counter) elif isinstance(session.analysis_reports_counter, collections.Counter): analysis_reports_counter += session.analysis_reports_counter else: analysis_reports_counter_error = True # Check for a dict for backwards compatibility. if isinstance(session.event_labels_counter, dict): event_labels_counter += collections.Counter( session.event_labels_counter) elif isinstance(session.event_labels_counter, collections.Counter): event_labels_counter += session.event_labels_counter else: event_labels_counter_error = True # Check for a dict for backwards compatibility. if isinstance(session.parsers_counter, dict): parsers_counter += collections.Counter(session.parsers_counter) elif isinstance(session.parsers_counter, collections.Counter): parsers_counter += session.parsers_counter else: parsers_counter_error = True storage_counters = {} warnings_by_path_spec = collections.Counter() warnings_by_parser_chain = collections.Counter() for warning in list(storage_reader.GetWarnings()): warnings_by_path_spec[warning.path_spec.comparable] += 1 warnings_by_parser_chain[warning.parser_chain] += 1 storage_counters['warnings_by_path_spec'] = warnings_by_path_spec storage_counters['warnings_by_parser_chain'] = warnings_by_parser_chain if not analysis_reports_counter_error: storage_counters['analysis_reports'] = analysis_reports_counter if not event_labels_counter_error: storage_counters['event_labels'] = event_labels_counter if not parsers_counter_error: storage_counters['parsers'] = parsers_counter return storage_counters
288,391
Compares the contents of two stores. Args: storage_reader (StorageReader): storage reader. compare_storage_reader (StorageReader): storage to compare against. Returns: bool: True if the content of the stores is identical.
def _CompareStores(self, storage_reader, compare_storage_reader): storage_counters = self._CalculateStorageCounters(storage_reader) compare_storage_counters = self._CalculateStorageCounters( compare_storage_reader) # TODO: improve comparison, currently only total numbers are compared. return storage_counters == compare_storage_counters
288,392
Prints the details of the analysis reports. Args: storage_reader (StorageReader): storage reader.
def _PrintAnalysisReportsDetails(self, storage_reader): if not storage_reader.HasAnalysisReports(): self._output_writer.Write('No analysis reports stored.\n\n') return for index, analysis_report in enumerate( storage_reader.GetAnalysisReports()): title = 'Analysis report: {0:d}'.format(index) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) table_view.AddRow(['String', analysis_report.GetString()]) table_view.Write(self._output_writer)
288,393
Prints a summary of the warnings. Args: storage_counters (dict): storage counters.
def _PrintWarningCounters(self, storage_counters): warnings_by_pathspec = storage_counters.get('warnings_by_path_spec', {}) warnings_by_parser_chain = storage_counters.get( 'warnings_by_parser_chain', {}) if not warnings_by_parser_chain: self._output_writer.Write('No warnings stored.\n\n') return table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Warnings generated per parser', column_names=['Parser (plugin) name', 'Number of warnings']) for parser_chain, count in warnings_by_parser_chain.items(): parser_chain = parser_chain or '<No parser>' table_view.AddRow([parser_chain, '{0:d}'.format(count)]) table_view.Write(self._output_writer) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Pathspecs with most warnings', column_names=['Number of warnings', 'Pathspec']) top_pathspecs = warnings_by_pathspec.most_common(10) for pathspec, count in top_pathspecs: for path_index, line in enumerate(pathspec.split('\n')): if not line: continue if path_index == 0: table_view.AddRow(['{0:d}'.format(count), line]) else: table_view.AddRow(['', line]) table_view.Write(self._output_writer)
288,394
Prints the details of the warnings. Args: storage (BaseStore): storage.
def _PrintWarningsDetails(self, storage): if not storage.HasWarnings(): self._output_writer.Write('No warnings stored.\n\n') return for index, warning in enumerate(storage.GetWarnings()): title = 'Warning: {0:d}'.format(index) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) table_view.AddRow(['Message', warning.message]) table_view.AddRow(['Parser chain', warning.parser_chain]) path_specification = warning.path_spec.comparable for path_index, line in enumerate(path_specification.split('\n')): if not line: continue if path_index == 0: table_view.AddRow(['Path specification', line]) else: table_view.AddRow(['', line]) table_view.Write(self._output_writer)
288,395
Prints the event labels counter. Args: event_labels_counter (collections.Counter): number of event tags per label. session_identifier (Optional[str]): session identifier.
def _PrintEventLabelsCounter( self, event_labels_counter, session_identifier=None): if not event_labels_counter: return title = 'Event tags generated per label' if session_identifier: title = '{0:s}: {1:s}'.format(title, session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Label', 'Number of event tags'], title=title) for key, value in sorted(event_labels_counter.items()): if key == 'total': continue table_view.AddRow([key, value]) try: total = event_labels_counter['total'] except KeyError: total = 'N/A' table_view.AddRow(['Total', total]) table_view.Write(self._output_writer)
288,396
Prints the parsers counter Args: parsers_counter (collections.Counter): number of events per parser or parser plugin. session_identifier (Optional[str]): session identifier.
def _PrintParsersCounter(self, parsers_counter, session_identifier=None): if not parsers_counter: return title = 'Events generated per parser' if session_identifier: title = '{0:s}: {1:s}'.format(title, session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Parser (plugin) name', 'Number of events'], title=title) for key, value in sorted(parsers_counter.items()): if key == 'total': continue table_view.AddRow([key, value]) table_view.AddRow(['Total', parsers_counter['total']]) table_view.Write(self._output_writer)
288,397
Prints the details of the preprocessing information. Args: storage_reader (StorageReader): storage reader. session_number (Optional[int]): session number.
def _PrintPreprocessingInformation(self, storage_reader, session_number=None): knowledge_base_object = knowledge_base.KnowledgeBase() storage_reader.ReadPreprocessingInformation(knowledge_base_object) # TODO: replace session_number by session_identifier. system_configuration = knowledge_base_object.GetSystemConfigurationArtifact( session_identifier=session_number) if not system_configuration: return title = 'System configuration' table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) hostname = 'N/A' if system_configuration.hostname: hostname = system_configuration.hostname.name operating_system = system_configuration.operating_system or 'N/A' operating_system_product = ( system_configuration.operating_system_product or 'N/A') operating_system_version = ( system_configuration.operating_system_version or 'N/A') code_page = system_configuration.code_page or 'N/A' keyboard_layout = system_configuration.keyboard_layout or 'N/A' time_zone = system_configuration.time_zone or 'N/A' table_view.AddRow(['Hostname', hostname]) table_view.AddRow(['Operating system', operating_system]) table_view.AddRow(['Operating system product', operating_system_product]) table_view.AddRow(['Operating system version', operating_system_version]) table_view.AddRow(['Code page', code_page]) table_view.AddRow(['Keyboard layout', keyboard_layout]) table_view.AddRow(['Time zone', time_zone]) table_view.Write(self._output_writer) title = 'User accounts' table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Username', 'User directory'], title=title) for user_account in system_configuration.user_accounts: table_view.AddRow([ user_account.username, user_account.user_directory]) table_view.Write(self._output_writer)
288,398
Prints the details of the sessions. Args: storage_reader (BaseStore): storage.
def _PrintSessionsDetails(self, storage_reader): for session_number, session in enumerate(storage_reader.GetSessions()): session_identifier = uuid.UUID(hex=session.identifier) session_identifier = '{0!s}'.format(session_identifier) start_time = 'N/A' if session.start_time is not None: start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time) completion_time = 'N/A' if session.completion_time is not None: completion_time = timelib.Timestamp.CopyToIsoFormat( session.completion_time) enabled_parser_names = 'N/A' if session.enabled_parser_names: enabled_parser_names = ', '.join(sorted(session.enabled_parser_names)) command_line_arguments = session.command_line_arguments or 'N/A' parser_filter_expression = session.parser_filter_expression or 'N/A' preferred_encoding = session.preferred_encoding or 'N/A' # Workaround for some older Plaso releases writing preferred encoding as # bytes. if isinstance(preferred_encoding, py2to3.BYTES_TYPE): preferred_encoding = preferred_encoding.decode('utf-8') if session.artifact_filters: artifact_filters_string = ', '.join(session.artifact_filters) else: artifact_filters_string = 'N/A' filter_file = session.filter_file or 'N/A' title = 'Session: {0:s}'.format(session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) table_view.AddRow(['Start time', start_time]) table_view.AddRow(['Completion time', completion_time]) table_view.AddRow(['Product name', session.product_name]) table_view.AddRow(['Product version', session.product_version]) table_view.AddRow(['Command line arguments', command_line_arguments]) table_view.AddRow(['Parser filter expression', parser_filter_expression]) table_view.AddRow(['Enabled parser and plugins', enabled_parser_names]) table_view.AddRow(['Preferred encoding', preferred_encoding]) table_view.AddRow(['Debug mode', session.debug_mode]) table_view.AddRow(['Artifact filters', artifact_filters_string]) table_view.AddRow(['Filter file', filter_file]) table_view.Write(self._output_writer) if self._verbose: self._PrintPreprocessingInformation(storage_reader, session_number + 1) self._PrintParsersCounter( session.parsers_counter, session_identifier=session_identifier) self._PrintAnalysisReportCounter( session.analysis_reports_counter, session_identifier=session_identifier) self._PrintEventLabelsCounter( session.event_labels_counter, session_identifier=session_identifier)
288,399
Prints a sessions overview. Args: storage_reader (StorageReader): storage reader.
def _PrintSessionsOverview(self, storage_reader): table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Sessions') for session in storage_reader.GetSessions(): start_time = timelib.Timestamp.CopyToIsoFormat( session.start_time) session_identifier = uuid.UUID(hex=session.identifier) session_identifier = '{0!s}'.format(session_identifier) table_view.AddRow([session_identifier, start_time]) table_view.Write(self._output_writer)
288,400
Prints information about the store as human-readable text. Args: storage_reader (StorageReader): storage reader.
def _PrintStorageInformationAsText(self, storage_reader): table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Plaso Storage Information') table_view.AddRow(['Filename', os.path.basename(self._storage_file_path)]) table_view.AddRow(['Format version', storage_reader.format_version]) table_view.AddRow( ['Serialization format', storage_reader.serialization_format]) table_view.Write(self._output_writer) if storage_reader.storage_type == definitions.STORAGE_TYPE_SESSION: self._PrintSessionsOverview(storage_reader) self._PrintSessionsDetails(storage_reader) storage_counters = self._CalculateStorageCounters(storage_reader) if 'parsers' not in storage_counters: self._output_writer.Write( 'Unable to determine number of events generated per parser.\n') else: self._PrintParsersCounter(storage_counters['parsers']) if 'analysis_reports' not in storage_counters: self._output_writer.Write( 'Unable to determine number of reports generated per plugin.\n') else: self._PrintAnalysisReportCounter(storage_counters['analysis_reports']) if 'event_labels' not in storage_counters: self._output_writer.Write( 'Unable to determine number of event tags generated per label.\n') else: self._PrintEventLabelsCounter(storage_counters['event_labels']) self._PrintWarningCounters(storage_counters) if self._verbose: self._PrintWarningsDetails(storage_reader) self._PrintAnalysisReportsDetails(storage_reader) elif storage_reader.storage_type == definitions.STORAGE_TYPE_TASK: self._PrintTasksInformation(storage_reader)
288,401
Writes a summary of sessions as machine-readable JSON. Args: storage_reader (StorageReader): storage reader.
def _PrintStorageInformationAsJSON(self, storage_reader): serializer = json_serializer.JSONAttributeContainerSerializer storage_counters = self._CalculateStorageCounters(storage_reader) storage_counters_json = json.dumps(storage_counters) self._output_writer.Write('{') self._output_writer.Write('"storage_counters": {0:s}'.format( storage_counters_json)) self._output_writer.Write(',\n') self._output_writer.Write(' "sessions": {') for index, session in enumerate(storage_reader.GetSessions()): json_string = serializer.WriteSerialized(session) if index != 0: self._output_writer.Write(',\n') self._output_writer.Write('"session_{0:s}": {1:s} '.format( session.identifier, json_string)) self._output_writer.Write('}}')
288,402