docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Creates an object that reads lines from a text file.
The line reader is advanced to the beginning of the DSV content, skipping
any header lines.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
TextFile|BinaryLineReader: an object that implements an iterator
over lines in a text file.
Raises:
UnicodeDecodeError: if the file cannot be read with the specified
encoding.
|
def _CreateLineReader(self, file_object):
# The Python 2 csv module reads bytes and the Python 3 csv module Unicode
# reads strings.
if py2to3.PY_3:
line_reader = text_file.TextFile(
file_object, encoding=self._encoding, end_of_line=self._end_of_line)
# pylint: disable=protected-access
maximum_read_buffer_size = line_reader._MAXIMUM_READ_BUFFER_SIZE
else:
line_reader = line_reader_file.BinaryLineReader(
file_object, end_of_line=self._end_of_line)
maximum_read_buffer_size = line_reader.MAXIMUM_READ_BUFFER_SIZE
# Line length is one less than the maximum read buffer size so that we
# tell if there's a line that doesn't end at the end before the end of
# the file.
if self._maximum_line_length > maximum_read_buffer_size:
self._maximum_line_length = maximum_read_buffer_size - 1
# If we specifically define a number of lines we should skip, do that here.
for _ in range(0, self.NUMBER_OF_HEADER_LINES):
line_reader.readline(self._maximum_line_length)
return line_reader
| 288,678 |
Determines if a file begins with lines of the expected length.
As we know the maximum length of valid lines in the DSV file, the presence
of lines longer than this indicates that the file will not be parsed
successfully, without reading excessive data from a large file.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
bool: True if the file has lines of the expected length.
|
def _HasExpectedLineLength(self, file_object):
original_file_position = file_object.tell()
line_reader = self._CreateLineReader(file_object)
for _ in range(0, 20):
# Attempt to read a line that is longer than any line that should be in
# the file.
sample_line = line_reader.readline(self._maximum_line_length + 1)
if len(sample_line) > self._maximum_line_length:
file_object.seek(original_file_position)
return False
file_object.seek(original_file_position)
return True
| 288,679 |
Parses a DSV text file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
# TODO: Replace this with detection of the file encoding via byte-order
# marks. Also see: https://github.com/log2timeline/plaso/issues/1971
if not self._encoding:
self._encoding = parser_mediator.codepage
try:
if not self._HasExpectedLineLength(file_object):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s} with error: '
'unexpected line length.').format(self.NAME, display_name))
except UnicodeDecodeError as exception:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(
self.NAME, display_name, exception))
try:
line_reader = self._CreateLineReader(file_object)
reader = self._CreateDictReader(line_reader)
row_offset = line_reader.tell()
row = next(reader)
except (StopIteration, csv.Error, UnicodeDecodeError) as exception:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(
self.NAME, display_name, exception))
number_of_columns = len(self.COLUMNS)
number_of_records = len(row)
if number_of_records != number_of_columns:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Wrong number of '
'records (expected: {2:d}, got: {3:d})').format(
self.NAME, display_name, number_of_columns,
number_of_records))
for key, value in row.items():
if self._MAGIC_TEST_STRING in (key, value):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Signature '
'mismatch.').format(self.NAME, display_name))
row = self._ConvertRowToUnicode(parser_mediator, row)
if not self.VerifyRow(parser_mediator, row):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Verification '
'failed.').format(self.NAME, display_name))
self.ParseRow(parser_mediator, row_offset, row)
row_offset = line_reader.tell()
for row in reader:
if parser_mediator.abort:
break
row = self._ConvertRowToUnicode(parser_mediator, row)
self.ParseRow(parser_mediator, row_offset, row)
row_offset = line_reader.tell()
| 288,680 |
Parses an Amcache.hve file for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
|
def ParseFileObject(self, parser_mediator, file_object):
regf_file = pyregf.file() # pylint: disable=no-member
try:
regf_file.open_file_object(file_object)
except IOError:
# The error is currently ignored -> see TODO above related to the
# fixing of handling multiple parsers for the same file format.
return
root_key = regf_file.get_root_key()
if root_key is None:
regf_file.close()
return
root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY)
if root_file_key is None:
regf_file.close()
return
for volume_key in root_file_key.sub_keys:
for am_entry in volume_key.sub_keys:
self._ProcessAMCacheFileKey(am_entry, parser_mediator)
root_program_key = root_key.get_sub_key_by_path(
self._AMCACHE_ROOT_PROGRAM_KEY)
if root_program_key is None:
regf_file.close()
return
for am_entry in root_program_key.sub_keys:
self._ProcessAMCacheProgramKey(am_entry, parser_mediator)
regf_file.close()
| 288,683 |
Parses an Amcache Root/Programs key for events.
Args:
am_entry (pyregf.key): amcache Programs key.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
|
def _ProcessAMCacheProgramKey(self, am_entry, parser_mediator):
amcache_datetime = am_entry.get_value_by_name(
self._AMCACHE_P_INSTALLDATE).get_data_as_integer()
event_data = AmcacheProgramEventData()
name = am_entry.get_value_by_name(self._AMCACHE_P_NAME)
if name:
event_data.name = name.get_data_as_string()
version = am_entry.get_value_by_name(self._AMCACHE_P_VERSION)
if version:
event_data.version = version.get_data_as_string()
publisher = am_entry.get_value_by_name(self._AMCACHE_P_PUBLISHER)
if publisher:
event_data.publisher = publisher.get_data_as_string()
languagecode = am_entry.get_value_by_name(self._AMCACHE_P_LANGUAGECODE)
if languagecode:
event_data.languagecode = languagecode.get_data_as_string()
entrytype = am_entry.get_value_by_name(self._AMCACHE_P_ENTRYTYPE)
if entrytype:
event_data.entrytype = entrytype.get_data_as_string()
uninstallkey = am_entry.get_value_by_name(self._AMCACHE_P_UNINSTALLKEY)
if uninstallkey:
uninstallkey = uninstallkey.get_data()
uninstallkey = uninstallkey.decode('utf-16-LE')
event_data.uninstallkey = uninstallkey
filepaths = am_entry.get_value_by_name(self._AMCACHE_P_FILEPATHS)
if filepaths:
filepaths = filepaths.get_data()
filepaths = filepaths.decode('utf-16-LE')
event_data.filepaths = filepaths
productcode = am_entry.get_value_by_name(self._AMCACHE_P_PRODUCTCODE)
if productcode:
event_data.productcode = productcode.get_data_as_string()
packagecode = am_entry.get_value_by_name(self._AMCACHE_P_PACKAGECODE)
if packagecode:
event_data.packagecode = packagecode.get_data_as_string()
msiproductcode = am_entry.get_value_by_name(self._AMCACHE_P_MSIPRODUCTCODE)
if msiproductcode:
msiproductcode = msiproductcode.get_data()
msiproductcode = msiproductcode.decode('utf-16-LE')
event_data.msiproductcode = msiproductcode
msipackagecode = am_entry.get_value_by_name(self._AMCACHE_P_MSIPACKAGECODE)
if msipackagecode:
msipackagecode = msipackagecode.get_data()
msipackagecode = msipackagecode.decode('utf-16-LE')
event_data.msipackagecode = msipackagecode
files = am_entry.get_value_by_name(self._AMCACHE_P_FILES)
if files:
files = files.get_data()
files = files.decode('utf-16-LE')
event_data.files = files
event = time_events.DateTimeValuesEvent(
posix_time.PosixTime(amcache_datetime),
definitions.TIME_DESCRIPTION_INSTALLATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,684 |
Parses an Amcache Root/File key for events.
Args:
am_entry (pyregf.key): amcache File key.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
|
def _ProcessAMCacheFileKey(self, am_entry, parser_mediator):
amcache_datetime = am_entry.get_value_by_name(
self._AMCACHE_DATETIME).get_data_as_integer()
event_data = AmcacheEventData()
event_data.full_path = am_entry.get_value_by_name(
self._AMCACHE_FULL_PATH).get_data_as_string()
# Strip off the 4 leading zero's from the sha1 hash.
event_data.sha1 = am_entry.get_value_by_name(
self._AMCACHE_SHA1).get_data_as_string()[4:]
productname = am_entry.get_value_by_name(self._AMCACHE_PRODUCTNAME)
if productname:
event_data.productname = productname.get_data_as_string()
companyname = am_entry.get_value_by_name(self._AMCACHE_COMPANYNAME)
if companyname:
event_data.companyname = companyname.get_data_as_string()
fileversion = am_entry.get_value_by_name(self._AMCACHE_FILEVERSION)
if fileversion:
event_data.fileversion = fileversion.get_data_as_string()
languagecode = am_entry.get_value_by_name(self._AMCACHE_LANGUAGECODE)
if languagecode:
event_data.languagecode = languagecode.get_data_as_integer()
filesize = am_entry.get_value_by_name(self._AMCACHE_FILESIZE)
if filesize:
event_data.filesize = filesize.get_data_as_integer()
filedescription = am_entry.get_value_by_name(self._AMCACHE_FILEDESCRIPTION)
if filedescription:
event_data.filedescription = filedescription.get_data_as_string()
linkerts = am_entry.get_value_by_name(self._AMCACHE_LINKERTS)
if linkerts:
event_data.linkerts = linkerts.get_data_as_integer()
lastmodifiedts = am_entry.get_value_by_name(self._AMCACHE_LASTMODIFIEDTS)
if lastmodifiedts:
event_data.lastmodifiedts = lastmodifiedts.get_data_as_integer()
createdts = am_entry.get_value_by_name(self._AMCACHE_CREATEDTS)
if createdts:
event_data.createdts = createdts.get_data_as_integer()
programid = am_entry.get_value_by_name(self._AMCACHE_PROGRAMID)
if programid:
event_data.programid = programid.get_data_as_string()
event = time_events.DateTimeValuesEvent(
filetime.Filetime(amcache_datetime),
definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if event_data.createdts:
event = time_events.DateTimeValuesEvent(
filetime.Filetime(event_data.createdts),
definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if event_data.lastmodifiedts:
event = time_events.DateTimeValuesEvent(
filetime.Filetime(event_data.lastmodifiedts),
definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if event_data.linkerts:
event = time_events.DateTimeValuesEvent(
posix_time.PosixTime(event_data.linkerts),
definitions.TIME_DESCRIPTION_CHANGE)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,685 |
Initializes an nsrlsvr analyzer thread.
Args:
hash_queue (Queue.queue): contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): that the analyzer will append
HashAnalysis objects this queue.
|
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
super(NsrlsvrAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._host = None
self._port = None
self.hashes_per_batch = 100
| 288,686 |
Queries nsrlsvr for a specific hash.
Args:
nsrl_socket (socket._socketobject): socket of connection to nsrlsvr.
digest (str): hash to look up.
Returns:
bool: True if the hash was found, False if not or None on error.
|
def _QueryHash(self, nsrl_socket, digest):
try:
query = 'QUERY {0:s}\n'.format(digest).encode('ascii')
except UnicodeDecodeError:
logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest))
return False
response = None
try:
nsrl_socket.sendall(query)
response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE)
except socket.error as exception:
logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(
exception))
if not response:
return False
# Strip end-of-line characters since they can differ per platform on which
# nsrlsvr is running.
response = response.strip()
# nsrlsvr returns "OK 1" if the has was found or "OK 0" if not.
return response == b'OK 1'
| 288,688 |
Looks up hashes in nsrlsvr.
Args:
hashes (list[str]): hash values to look up.
Returns:
list[HashAnalysis]: analysis results, or an empty list on error.
|
def Analyze(self, hashes):
logger.debug(
'Opening connection to {0:s}:{1:d}'.format(self._host, self._port))
nsrl_socket = self._GetSocket()
if not nsrl_socket:
self.SignalAbort()
return []
hash_analyses = []
for digest in hashes:
response = self._QueryHash(nsrl_socket, digest)
if response is None:
continue
hash_analysis = interface.HashAnalysis(digest, response)
hash_analyses.append(hash_analysis)
nsrl_socket.close()
logger.debug(
'Closed connection to {0:s}:{1:d}'.format(self._host, self._port))
return hash_analyses
| 288,689 |
Extracts relevant TimeMachine entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
backup_alias_map = self._GetDataTypeMap('timemachine_backup_alias')
destinations = match.get('Destinations', [])
for destination in destinations:
backup_alias_data = destination.get('BackupAlias', b'')
try:
backup_alias = self._ReadStructureFromByteStream(
backup_alias_data, 0, backup_alias_map)
alias = backup_alias.string
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse backup alias value with error: {0!s}'.format(
exception))
alias = 'Unknown alias'
destination_identifier = (
destination.get('DestinationID', None) or 'Unknown device')
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'TimeMachine Backup in {0:s} ({1:s})'.format(
alias, destination_identifier)
event_data.key = 'item/SnapshotDates'
event_data.root = '/Destinations'
snapshot_dates = destination.get('SnapshotDates', [])
for datetime_value in snapshot_dates:
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,691 |
Retrieves the Id value from Task Cache Tree key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Yields:
tuple: containing:
dfwinreg.WinRegistryKey: Windows Registry key.
dfwinreg.WinRegistryValue: Windows Registry value.
|
def _GetIdValue(self, registry_key):
id_value = registry_key.GetValueByName('Id')
if id_value:
yield registry_key, id_value
for sub_key in registry_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
yield value_key, id_value
| 288,693 |
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
dynamic_info_size_error_reported = False
tasks_key = registry_key.GetSubkeyByName('Tasks')
tree_key = registry_key.GetSubkeyByName('Tree')
if not tasks_key or not tree_key:
parser_mediator.ProduceExtractionWarning(
'Task Cache is missing a Tasks or Tree sub key.')
return
task_guids = {}
for sub_key in tree_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
# TODO: improve this check to a regex.
# The GUID is in the form {%GUID%} and stored an UTF-16 little-endian
# string and should be 78 bytes in size.
id_value_data_size = len(id_value.data)
if id_value_data_size != 78:
parser_mediator.ProduceExtractionWarning(
'unsupported Id value data size: {0:d}.'.format(
id_value_data_size))
continue
guid_string = id_value.GetDataAsObject()
task_guids[guid_string] = value_key.name
dynamic_info_map = self._GetDataTypeMap('dynamic_info_record')
dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record')
dynamic_info_size = dynamic_info_map.GetByteSize()
dynamic_info2_size = dynamic_info2_map.GetByteSize()
for sub_key in tasks_key.GetSubkeys():
dynamic_info_value = sub_key.GetValueByName('DynamicInfo')
if not dynamic_info_value:
continue
dynamic_info_record_map = None
dynamic_info_value_data_size = len(dynamic_info_value.data)
if dynamic_info_value_data_size == dynamic_info_size:
dynamic_info_record_map = dynamic_info_map
elif dynamic_info_value_data_size == dynamic_info2_size:
dynamic_info_record_map = dynamic_info2_map
else:
if not dynamic_info_size_error_reported:
parser_mediator.ProduceExtractionWarning(
'unsupported DynamicInfo value data size: {0:d}.'.format(
dynamic_info_value_data_size))
dynamic_info_size_error_reported = True
continue
try:
dynamic_info_record = self._ReadStructureFromByteStream(
dynamic_info_value.data, 0, dynamic_info_record_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse DynamicInfo record with error: {0!s}.'.format(
exception))
name = task_guids.get(sub_key.name, sub_key.name)
values_dict = {}
values_dict['Task: {0:s}'.format(name)] = '[ID: {0:s}]'.format(
sub_key.name)
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = TaskCacheEventData()
event_data.task_name = name
event_data.task_identifier = sub_key.name
last_registered_time = dynamic_info_record.last_registered_time
if last_registered_time:
# Note this is likely either the last registered time or
# the update time.
date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Last registered time')
parser_mediator.ProduceEventWithEventData(event, event_data)
launch_time = dynamic_info_record.launch_time
if launch_time:
# Note this is likely the launch time.
date_time = dfdatetime_filetime.Filetime(timestamp=launch_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Launch time')
parser_mediator.ProduceEventWithEventData(event, event_data)
unknown_time = getattr(dynamic_info_record, 'unknown_time', None)
if unknown_time:
date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,694 |
Verifies a PLS Recall record.
Args:
pls_record (pls_recall_record): a PLS Recall record to verify.
Returns:
bool: True if this is a valid PLS Recall record, False otherwise.
|
def _VerifyRecord(self, pls_record):
# Verify that the timestamp is no more than six years into the future.
# Six years is an arbitrary time length just to evaluate the timestamp
# against some value. There is no guarantee that this will catch everything.
# TODO: Add a check for similarly valid value back in time. Maybe if it the
# timestamp is before 1980 we are pretty sure it is invalid?
# TODO: This is a very flaky assumption. Find a better one.
future_timestamp = (
timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS)
if pls_record.last_written_time > future_timestamp:
return False
# Take the first word from the query field and attempt to match that against
# known query keywords.
first_word, _, _ = pls_record.query.partition(' ')
if first_word.lower() not in self._PLS_KEYWORD:
return False
return True
| 288,696 |
Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
file_offset = 0
file_size = file_object.get_size()
record_map = self._GetDataTypeMap('pls_recall_record')
while file_offset < file_size:
try:
pls_record, record_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
except (ValueError, errors.ParseError) as exception:
if file_offset == 0:
raise errors.UnableToParseFile('Unable to parse first record.')
parser_mediator.ProduceExtractionWarning((
'unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
if file_offset == 0 and not self._VerifyRecord(pls_record):
raise errors.UnableToParseFile('Verification of first record failed.')
event_data = PlsRecallEventData()
event_data.database_name = pls_record.database_name.rstrip('\x00')
event_data.sequence_number = pls_record.sequence_number
event_data.offset = file_offset
event_data.query = pls_record.query.rstrip('\x00')
event_data.username = pls_record.username.rstrip('\x00')
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.last_written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset += record_data_size
| 288,697 |
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
regvalue = event_values.get('regvalue', {})
string_parts = []
for key, value in sorted(regvalue.items()):
string_parts.append('{0:s}: {1!s}'.format(key, value))
event_values['text'] = ' '.join(string_parts)
urls = event_values.get('urls', [])
if urls:
event_values['urls'] = ' - '.join(urls)
if 'key_path' in event_values:
format_string = self.FORMAT_STRING
else:
format_string = self.FORMAT_STRING_ALTERNATIVE
return self._FormatMessages(
format_string, self.FORMAT_STRING_SHORT, event_values)
| 288,698 |
Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
def GetSources(self, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
source_long = getattr(event, 'source_long', 'UNKNOWN')
source_append = getattr(event, 'source_append', None)
if source_append:
source_long = '{0:s} {1:s}'.format(source_long, source_append)
return self.SOURCE_SHORT, source_long
| 288,699 |
Analyzes an event and extracts domains from it.
We only evaluate straightforward web history events, not visits which can
be inferred by TypedURLs, cookies or other means.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
|
def ExamineEvent(self, mediator, event):
if event.data_type not in self._DATATYPES:
return
url = getattr(event, 'url', None)
if url is None:
return
parsed_url = urlparse.urlparse(url)
domain = getattr(parsed_url, 'netloc', None)
if domain in self._domains:
# We've already found an event containing this domain.
return
self._domains.append(domain)
| 288,700 |
Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: the analysis report.
|
def CompileReport(self, mediator):
lines_of_text = ['Listing domains visited by all users']
for domain in sorted(self._domains):
lines_of_text.append(domain)
lines_of_text.append('')
report_text = '\n'.join(lines_of_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
| 288,701 |
Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the Windows Registry key matches the filter.
|
def Match(self, registry_key):
key_path_upper = registry_key.path.upper()
# Prevent this filter matching non-string MRUListEx values.
for ignore_key_path_suffix in self._IGNORE_KEY_PATH_SUFFIXES:
if key_path_upper.endswith(ignore_key_path_suffix):
return False
for ignore_key_path_segment in self._IGNORE_KEY_PATH_SEGMENTS:
if ignore_key_path_segment in key_path_upper:
return False
return super(MRUListExStringRegistryKeyFilter, self).Match(registry_key)
| 288,702 |
Extract event objects from a MRUListEx Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
|
def _ParseMRUListExKey(
self, parser_mediator, registry_key, codepage='cp1252'):
try:
mrulistex = self._ParseMRUListExValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse MRUListEx value with error: {0!s}'.format(exception))
return
if not mrulistex:
return
values_dict = {}
found_terminator = False
for entry_index, entry_number in enumerate(mrulistex):
# The MRU list is terminated with -1 (0xffffffff).
if entry_number == -1:
break
if found_terminator:
parser_mediator.ProduceExtractionWarning((
'found additional MRUListEx entries after terminator in key: '
'{0:s}.').format(registry_key.path))
# Only create one parser error per terminator.
found_terminator = False
value_string = self._ParseMRUListExEntryValue(
parser_mediator, registry_key, entry_index, entry_number,
codepage=codepage)
value_text = 'Index: {0:d} [MRU Value {1:d}]'.format(
entry_index + 1, entry_number)
values_dict[value_text] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,703 |
Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
Returns:
str: MRUList entry value.
|
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number, **kwargs):
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif value.DataIsString():
value_string = value.GetDataAsObject()
elif value.DataIsBinaryData():
utf16le_string_map = self._GetDataTypeMap('utf16le_string')
try:
value_string = self._ReadStructureFromByteStream(
value.data, 0, utf16le_string_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MRUListEx entry value: {0:d} with error: '
'{1!s}').format(entry_number, exception))
value_string = value_string.rstrip('\x00')
return value_string
| 288,704 |
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
|
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
| 288,705 |
Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
|
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data, codepage=codepage)
value_string = 'Shell item path: {0:s}'.format(
shell_items_parser.CopyToPath())
return value_string
| 288,706 |
Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
|
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
codepage='cp1252', **kwargs):
value_string = ''
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUListEx value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
'{2:s}.').format(self.NAME, entry_number, registry_key.path))
elif value.data:
utf16le_string_map = self._GetDataTypeMap('utf16le_string')
context = dtfabric_data_maps.DataTypeMapContext()
try:
path = self._ReadStructureFromByteStream(
value.data, 0, utf16le_string_map, context=context)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MRUListEx entry value: {0:d} with error: '
'{1!s}').format(entry_number, exception))
return value_string
path = path.rstrip('\x00')
shell_item_data = value.data[context.byte_size:]
if not shell_item_data:
parser_mediator.ProduceExtractionWarning((
'missing shell item in MRUListEx value: {0:d} in key: '
'{1:s}.').format(entry_number, registry_key.path))
value_string = 'Path: {0:s}'.format(path)
else:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, shell_item_data, codepage=codepage)
value_string = 'Path: {0:s}, Shell item: [{1:s}]'.format(
path, shell_items_parser.CopyToPath())
return value_string
| 288,707 |
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
|
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
if registry_key.name == 'RecentDocs':
# For the RecentDocs MRUListEx we also need to parse its subkeys
# since the Registry key path does not support wildcards yet.
for subkey in registry_key.GetSubkeys():
self._ParseMRUListExKey(parser_mediator, subkey, codepage=codepage)
| 288,708 |
Parses a comment.
Args:
structure (pyparsing.ParseResults): structure parsed from the log file.
|
def _ParseComment(self, structure):
if structure[1] == 'Date:':
self._year, self._month, self._day_of_month, _, _, _ = structure.date_time
elif structure[1] == 'Fields:':
self._ParseFieldsMetadata(structure)
| 288,710 |
Parses the fields metadata and updates the log line definition to match.
Args:
structure (pyparsing.ParseResults): structure parsed from the log file.
|
def _ParseFieldsMetadata(self, structure):
fields = structure.fields.split(' ')
log_line_structure = pyparsing.Empty()
if fields[0] == 'date' and fields[1] == 'time':
log_line_structure += self.DATE_TIME.setResultsName('date_time')
fields = fields[2:]
for member in fields:
log_line_structure += self._LOG_LINE_STRUCTURES.get(member, self.URI)
updated_structures = []
for line_structure in self._line_structures:
if line_structure[0] != 'logline':
updated_structures.append(line_structure)
updated_structures.append(('logline', log_line_structure))
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
self._line_structures = updated_structures
| 288,711 |
Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure parsed from the log file.
|
def _ParseLogLine(self, parser_mediator, structure):
if structure.date_time:
time_elements_tuple = structure.date_time
elif structure.date and structure.time:
year, month, day_of_month = structure.date
hours, minutes, seconds = structure.time
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
elif structure.time:
hours, minutes, seconds = structure.time
time_elements_tuple = (
self._year, self._month, self._day_of_month, hours, minutes, seconds)
else:
parser_mediator.ProduceExtractionWarning('missing date and time values')
return
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
event_data = IISEventData()
for key, value in iter(structure.items()):
if key in ('date', 'date_time', 'time') or value == '-':
continue
if isinstance(value, pyparsing.ParseResults):
value = ''.join(value)
setattr(event_data, key, value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,712 |
Verify that this file is an IIS log file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line was successfully parsed.
|
def VerifyStructure(self, parser_mediator, line):
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
self._line_structures = self.LINE_STRUCTURES
self._day_of_month = None
self._month = None
self._year = None
# TODO: Examine other versions of the file format and if this parser should
# support them. For now just checking if it contains the IIS header.
if self._SIGNATURE in line:
return True
return False
| 288,713 |
Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier.
|
def _GetIdentifierFromPath(self, parser_mediator):
file_entry = parser_mediator.GetFileEntry()
path = file_entry.path_spec.location
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(path)
return path_segments[-2]
| 288,717 |
Extracts events from a Docker filesystem layer configuration file.
The path of each filesystem layer config file is:
DOCKER_DIR/graph/<layer_id>/json
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file is not a valid layer config file.
|
def _ParseLayerConfigJSON(self, parser_mediator, file_object):
file_content = file_object.read()
file_content = codecs.decode(file_content, self._ENCODING)
json_dict = json.loads(file_content)
if 'docker_version' not in json_dict:
raise errors.UnableToParseFile(
'not a valid Docker layer configuration file, missing '
'\'docker_version\' key.')
if 'created' in json_dict:
layer_creation_command_array = [
x.strip() for x in json_dict['container_config']['Cmd']]
layer_creation_command = ' '.join(layer_creation_command_array).replace(
'\t', '')
event_data = DockerJSONLayerEventData()
event_data.command = layer_creation_command
event_data.layer_id = self._GetIdentifierFromPath(parser_mediator)
timestamp = timelib.Timestamp.FromTimeString(json_dict['created'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,718 |
Extracts events from a Docker container configuration file.
The path of each container config file is:
DOCKER_DIR/containers/<container_id>/config.json
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file is not a valid container config file.
|
def _ParseContainerConfigJSON(self, parser_mediator, file_object):
file_content = file_object.read()
file_content = codecs.decode(file_content, self._ENCODING)
json_dict = json.loads(file_content)
if 'Driver' not in json_dict:
raise errors.UnableToParseFile(
'not a valid Docker container configuration file, ' 'missing '
'\'Driver\' key.')
container_id_from_path = self._GetIdentifierFromPath(parser_mediator)
container_id_from_json = json_dict.get('ID', None)
if not container_id_from_json:
raise errors.UnableToParseFile(
'not a valid Docker layer configuration file, the \'ID\' key is '
'missing from the JSON dict (should be {0:s})'.format(
container_id_from_path))
if container_id_from_json != container_id_from_path:
raise errors.UnableToParseFile(
'not a valid Docker container configuration file. The \'ID\' key of '
'the JSON dict ({0:s}) is different from the layer ID taken from the'
' path to the file ({1:s}) JSON file.)'.format(
container_id_from_json, container_id_from_path))
if 'Config' in json_dict and 'Hostname' in json_dict['Config']:
container_name = json_dict['Config']['Hostname']
else:
container_name = 'Unknown container name'
event_data = DockerJSONContainerEventData()
event_data.container_id = container_id_from_path
event_data.container_name = container_name
if 'State' in json_dict:
if 'StartedAt' in json_dict['State']:
event_data.action = 'Container Started'
timestamp = timelib.Timestamp.FromTimeString(
json_dict['State']['StartedAt'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
if 'FinishedAt' in json_dict['State']:
if json_dict['State']['FinishedAt'] != '0001-01-01T00:00:00Z':
event_data.action = 'Container Finished'
# If the timestamp is 0001-01-01T00:00:00Z, the container
# is still running, so we don't generate a Finished event
timestamp = timelib.Timestamp.FromTimeString(
json_dict['State']['FinishedAt'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_END)
parser_mediator.ProduceEventWithEventData(event, event_data)
created_time = json_dict.get('Created', None)
if created_time:
event_data.action = 'Container Created'
timestamp = timelib.Timestamp.FromTimeString(created_time)
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,719 |
Extract events from a Docker container log files.
The format is one JSON formatted log message per line.
The path of each container log file (which logs the container stdout and
stderr) is:
DOCKER_DIR/containers/<container_id>/<container_id>-json.log
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
|
def _ParseContainerLogJSON(self, parser_mediator, file_object):
container_id = self._GetIdentifierFromPath(parser_mediator)
text_file_object = text_file.TextFile(file_object)
for log_line in text_file_object:
json_log_line = json.loads(log_line)
time = json_log_line.get('time', None)
if not time:
continue
event_data = DockerJSONContainerLogEventData()
event_data.container_id = container_id
event_data.log_line = json_log_line.get('log', None)
event_data.log_source = json_log_line.get('stream', None)
# TODO: pass line number to offset or remove.
event_data.offset = 0
timestamp = timelib.Timestamp.FromTimeString(time)
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,720 |
Parses a message row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = IMessageEventData()
event_data.attachment_location = self._GetRowValue(
query_hash, row, 'attachment_location')
event_data.imessage_id = self._GetRowValue(query_hash, row, 'imessage_id')
event_data.message_type = self._GetRowValue(query_hash, row, 'message_type')
event_data.offset = self._GetRowValue(query_hash, row, 'ROWID')
event_data.query = query
event_data.read_receipt = self._GetRowValue(query_hash, row, 'read_receipt')
event_data.service = self._GetRowValue(query_hash, row, 'service')
event_data.text = self._GetRowValue(query_hash, row, 'text')
timestamp = self._GetRowValue(query_hash, row, 'date')
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,723 |
Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed.
|
def _ConvertValueBinaryDataToUBInt64(self, value):
if not value:
return None
integer_map = self._GetDataTypeMap('uint64be')
try:
return self._ReadStructureFromByteStream(value, 0, integer_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(
exception))
| 288,725 |
Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
|
def _GetRecordValue(self, record, value_entry):
column_type = record.get_column_type(value_entry)
long_value = None
if record.is_long_value(value_entry):
long_value = record.get_value_data_as_long_value(value_entry)
if record.is_multi_value(value_entry):
# TODO: implement
raise ValueError('Multi value support not implemented yet.')
if column_type == pyesedb.column_types.NULL:
return None
if column_type == pyesedb.column_types.BOOLEAN:
# TODO: implement
raise ValueError('Boolean value support not implemented yet.')
if column_type in self.INTEGER_COLUMN_TYPES:
if long_value:
raise ValueError('Long integer value not supported.')
return record.get_value_data_as_integer(value_entry)
if column_type in self.FLOATING_POINT_COLUMN_TYPES:
if long_value:
raise ValueError('Long floating point value not supported.')
return record.get_value_data_as_floating_point(value_entry)
if column_type in self.STRING_COLUMN_TYPES:
if long_value:
return long_value.get_data_as_string()
return record.get_value_data_as_string(value_entry)
if column_type == pyesedb.column_types.GUID:
# TODO: implement
raise ValueError('GUID value support not implemented yet.')
if long_value:
return long_value.get_data()
return record.get_value_data(value_entry)
| 288,726 |
Retrieves the values from the record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
table_name (str): name of the table.
record (pyesedb.record): ESE record.
value_mappings (Optional[dict[str, str]): value mappings, which map
the column name to a callback method.
Returns:
dict[str,object]: values per column name.
|
def _GetRecordValues(
self, parser_mediator, table_name, record, value_mappings=None):
record_values = {}
for value_entry in range(0, record.number_of_values):
if parser_mediator.abort:
break
column_name = record.get_column_name(value_entry)
if column_name in record_values:
logger.warning(
'[{0:s}] duplicate column: {1:s} in table: {2:s}'.format(
self.NAME, column_name, table_name))
continue
value_callback = None
if value_mappings and column_name in value_mappings:
value_callback_method = value_mappings.get(column_name)
if value_callback_method:
value_callback = getattr(self, value_callback_method, None)
if value_callback is None:
logger.warning((
'[{0:s}] missing value callback method: {1:s} for column: '
'{2:s} in table: {3:s}').format(
self.NAME, value_callback_method, column_name, table_name))
if value_callback:
try:
value_data = record.get_value_data(value_entry)
value = value_callback(value_data)
except Exception as exception: # pylint: disable=broad-except
logger.error(exception)
value = None
parser_mediator.ProduceExtractionWarning((
'unable to parse value: {0:s} with callback: {1:s} with error: '
'{2!s}').format(column_name, value_callback_method, exception))
else:
try:
value = self._GetRecordValue(record, value_entry)
except ValueError as exception:
value = None
parser_mediator.ProduceExtractionWarning(
'unable to parse value: {0:s} with error: {1!s}'.format(
column_name, exception))
record_values[column_name] = value
return record_values
| 288,727 |
Extracts event objects from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache.
database (Optional[pyesedb.file]): ESE database.
Raises:
ValueError: If the database attribute is not valid.
|
def GetEntries(self, parser_mediator, cache=None, database=None, **kwargs):
if database is None:
raise ValueError('Invalid database.')
for table_name, callback_method in iter(self._tables.items()):
if parser_mediator.abort:
break
if not callback_method:
# Table names without a callback method are allowed to improve
# the detection of a database based on its table names.
continue
callback = getattr(self, callback_method, None)
if callback is None:
logger.warning(
'[{0:s}] missing callback method: {1:s} for table: {2:s}'.format(
self.NAME, callback_method, table_name))
continue
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
logger.warning('[{0:s}] missing table: {1:s}'.format(
self.NAME, table_name))
continue
# The database is passed in case the database contains table names
# that are assigned dynamically and cannot be defined by
# the table name-callback mechanism.
callback(
parser_mediator, cache=cache, database=database, table=esedb_table,
**kwargs)
| 288,728 |
Determines if this is the appropriate plugin for the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache.
database (Optional[pyesedb.file]): ESE database.
Raises:
ValueError: If the database attribute is not valid.
|
def Process(self, parser_mediator, cache=None, database=None, **kwargs):
if database is None:
raise ValueError('Invalid database.')
# This will raise if unhandled keyword arguments are passed.
super(ESEDBPlugin, self).Process(parser_mediator)
self.GetEntries(
parser_mediator, cache=cache, database=database, **kwargs)
| 288,729 |
Simple method to exact date values from a Plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (dict[str, object]): plist top-level key.
|
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
for root, key, datetime_value in interface.RecurseKey(top_level):
if not isinstance(datetime_value, datetime.datetime):
continue
event_data = plist_event.PlistTimeEventData()
event_data.key = key
event_data.root = root
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,730 |
Parses a record and produces a Bash history event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): elements parsed from the file.
Raises:
ParseError: when the structure type is unknown.
|
def ParseRecord(self, parser_mediator, key, structure):
if key != 'log_entry':
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
event_data = BashHistoryEventData()
event_data.command = structure.command
date_time = dfdatetime_posix_time.PosixTime(timestamp=structure.timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,733 |
Verifies that this is a bash history file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
|
def VerifyStructure(self, parser_mediator, lines):
match_generator = self._VERIFICATION_GRAMMAR.scanString(lines, maxMatches=1)
return bool(list(match_generator))
| 288,734 |
Retrieves the network info within the signatures subkey.
Args:
signatures_key (dfwinreg.WinRegistryKey): a Windows Registry key.
Returns:
dict[str, tuple]: a tuple of default_gateway_mac and dns_suffix per
profile identifier (GUID).
|
def _GetNetworkInfo(self, signatures_key):
network_info = {}
for category in signatures_key.GetSubkeys():
for signature in category.GetSubkeys():
profile_guid_value = signature.GetValueByName('ProfileGuid')
if profile_guid_value:
profile_guid = profile_guid_value.GetDataAsObject()
else:
continue
default_gateway_mac_value = signature.GetValueByName(
'DefaultGatewayMac')
if default_gateway_mac_value:
default_gateway_mac = ':'.join([
'{0:02x}'.format(octet)
for octet in bytearray(default_gateway_mac_value.data)])
else:
default_gateway_mac = None
dns_suffix_value = signature.GetValueByName('DnsSuffix')
if dns_suffix_value:
dns_suffix = dns_suffix_value.GetDataAsObject()
else:
dns_suffix = None
network_info[profile_guid] = (default_gateway_mac, dns_suffix)
return network_info
| 288,736 |
Parses a SYSTEMTIME date and time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
dfdatetime.Systemtime: SYSTEMTIME date and time value or None if no
value is set.
Raises:
ParseError: if the SYSTEMTIME could not be parsed.
|
def _ParseSystemTime(self, byte_stream):
systemtime_map = self._GetDataTypeMap('systemtime')
try:
systemtime = self._ReadStructureFromByteStream(
byte_stream, 0, systemtime_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse SYSTEMTIME value with error: {0!s}'.format(
exception))
system_time_tuple = (
systemtime.year, systemtime.month, systemtime.weekday,
systemtime.day_of_month, systemtime.hours, systemtime.minutes,
systemtime.seconds, systemtime.milliseconds)
if system_time_tuple == self._EMPTY_SYSTEM_TIME_TUPLE:
return None
try:
return dfdatetime_systemtime.Systemtime(
system_time_tuple=system_time_tuple)
except ValueError:
raise errors.ParseError(
'Invalid SYSTEMTIME value: {0!s}'.format(system_time_tuple))
| 288,737 |
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
network_info = {}
signatures = registry_key.GetSubkeyByName('Signatures')
if signatures:
network_info = self._GetNetworkInfo(signatures)
profiles = registry_key.GetSubkeyByName('Profiles')
if not profiles:
return
for subkey in profiles.GetSubkeys():
default_gateway_mac, dns_suffix = network_info.get(
subkey.name, (None, None))
event_data = WindowsRegistryNetworkEventData()
event_data.default_gateway_mac = default_gateway_mac
event_data.dns_suffix = dns_suffix
ssid_value = subkey.GetValueByName('ProfileName')
if ssid_value:
event_data.ssid = ssid_value.GetDataAsObject()
description_value = subkey.GetValueByName('Description')
if description_value:
event_data.description = description_value.GetDataAsObject()
connection_type_value = subkey.GetValueByName('NameType')
if connection_type_value:
connection_type = connection_type_value.GetDataAsObject()
# TODO: move to formatter.
connection_type = self._CONNECTION_TYPE.get(
connection_type, 'unknown')
event_data.connection_type = connection_type
date_created_value = subkey.GetValueByName('DateCreated')
if date_created_value:
try:
date_time = self._ParseSystemTime(date_created_value.data)
except errors.ParseError as exception:
date_time = None
parser_mediator.ProduceExtractionWarning(
'unable to parse date created with error: {0!s}'.format(
exception))
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
date_last_connected_value = subkey.GetValueByName('DateLastConnected')
if date_last_connected_value:
try:
date_time = self._ParseSystemTime(date_last_connected_value.data)
except errors.ParseError as exception:
date_time = None
parser_mediator.ProduceExtractionWarning(
'unable to parse date last connected with error: {0!s}'.format(
exception))
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,738 |
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
message_type = event_values.get('message_type', None)
if message_type is not None:
event_values['message_type'] = (
self._MESSAGE_TYPE.get(message_type, 'UNKNOWN'))
message_status = event_values.get('message_status', None)
if message_status is not None:
event_values['message_status'] = (
self._MESSAGE_STATUS.get(message_status, 'UNKNOWN'))
return self._ConditionalFormatMessages(event_values)
| 288,739 |
Initializes a task attribute container.
Args:
session_identifier (Optional[str]): identifier of the session the task
is part of.
|
def __init__(self, session_identifier=None):
super(Task, self).__init__()
self.aborted = False
self.completion_time = None
self.file_entry_type = None
self.has_retry = False
self.identifier = '{0:s}'.format(uuid.uuid4().hex)
self.last_processing_time = None
self.merge_priority = None
self.path_spec = None
self.session_identifier = session_identifier
self.start_time = int(time.time() * definitions.MICROSECONDS_PER_SECOND)
self.storage_file_size = None
| 288,740 |
Initializes a task completion attribute container.
Args:
identifier (Optional[str]): unique identifier of the task.
The identifier should match that of the corresponding
task start information.
session_identifier (Optional[str]): identifier of the session the task
is part of.
|
def __init__(self, identifier=None, session_identifier=None):
super(TaskCompletion, self).__init__()
self.aborted = False
self.identifier = identifier
self.session_identifier = session_identifier
self.timestamp = None
| 288,744 |
Initializes a task start attribute container.
Args:
identifier (Optional[str]): unique identifier of the task.
The identifier should match that of the corresponding
task completion information.
session_identifier (Optional[str]): identifier of the session the task
is part of.
|
def __init__(self, identifier=None, session_identifier=None):
super(TaskStart, self).__init__()
self.identifier = identifier
self.session_identifier = session_identifier
self.timestamp = None
| 288,745 |
Get the Service DLL for a service, if it exists.
Checks for a ServiceDLL for in the Parameters subkey of a service key in
the Registry.
Args:
key (dfwinreg.WinRegistryKey): a Windows Registry key.
Returns:
str: path of the service DLL or None.
|
def GetServiceDll(self, key):
parameters_key = key.GetSubkeyByName('Parameters')
if not parameters_key:
return None
service_dll = parameters_key.GetValueByName('ServiceDll')
if not service_dll:
return None
return service_dll.GetDataAsObject()
| 288,746 |
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
values_dict = {}
service_type_value = registry_key.GetValueByName('Type')
service_start_value = registry_key.GetValueByName('Start')
# Grab the ServiceDLL value if it exists.
if service_type_value and service_start_value:
service_dll = self.GetServiceDll(registry_key)
if service_dll:
values_dict['ServiceDll'] = service_dll
# Gather all the other string and integer values and insert as they are.
for value in registry_key.GetValues():
if not value.name:
continue
if value.name not in values_dict:
if value.DataIsString() or value.DataIsInteger():
values_dict[value.name] = value.GetDataAsObject()
elif value.DataIsMultiString():
values_dict[value.name] = ', '.join(value.GetDataAsObject())
# Create a specific service event, so that we can recognize and expand
# certain values when we're outputting the event.
event_data = windows_events.WindowsRegistryServiceEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,747 |
Initializes a format specification.
Args:
identifier (str): unique name for the format.
text_format (Optional[bool]): True if the format is a text format,
False otherwise.
|
def __init__(self, identifier, text_format=False):
super(FormatSpecification, self).__init__()
self._text_format = text_format
self.identifier = identifier
self.signatures = []
| 288,749 |
Adds a signature.
Args:
pattern (bytes): pattern of the signature.
offset (int): offset of the signature. None is used to indicate
the signature has no offset. A positive offset is relative from
the start of the data a negative offset is relative from the end
of the data.
|
def AddNewSignature(self, pattern, offset=None):
self.signatures.append(Signature(pattern, offset=offset))
| 288,750 |
Adds a new format specification.
Args:
identifier (str): format identifier, which should be unique for the store.
Returns:
FormatSpecification: format specification.
Raises:
KeyError: if the store already contains a specification with
the same identifier.
|
def AddNewSpecification(self, identifier):
if identifier in self._format_specifications:
raise KeyError(
'Format specification {0:s} is already defined in store.'.format(
identifier))
self._format_specifications[identifier] = FormatSpecification(identifier)
return self._format_specifications[identifier]
| 288,752 |
Adds a format specification.
Args:
specification (FormatSpecification): format specification.
Raises:
KeyError: if the store already contains a specification with
the same identifier.
|
def AddSpecification(self, specification):
if specification.identifier in self._format_specifications:
raise KeyError(
'Format specification {0:s} is already defined in store.'.format(
specification.identifier))
self._format_specifications[specification.identifier] = specification
for signature in specification.signatures:
signature_index = len(self._signature_map)
signature_identifier = '{0:s}:{1:d}'.format(
specification.identifier, signature_index)
if signature_identifier in self._signature_map:
raise KeyError('Signature {0:s} is already defined in map.'.format(
signature_identifier))
signature.SetIdentifier(signature_identifier)
self._signature_map[signature_identifier] = specification
| 288,753 |
Parses an OLE Compound File (OLECF) file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
|
def ParseFileObject(self, parser_mediator, file_object):
olecf_file = pyolecf.file()
olecf_file.set_ascii_codepage(parser_mediator.codepage)
try:
olecf_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
root_item = olecf_file.root_item
if not root_item:
return
# Get a list of all items in the root item from the OLECF file.
item_names = [item.name for item in root_item.sub_items]
# Compare the list of available plugin objects.
# We will try to use every plugin against the file (except
# the default plugin) and run it. Only if none of the plugins
# works will we use the default plugin.
item_names = frozenset(item_names)
try:
for plugin in self._plugins:
if parser_mediator.abort:
break
if not plugin.REQUIRED_ITEMS.issubset(item_names):
continue
try:
plugin.UpdateChainAndProcess(parser_mediator, root_item=root_item)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'plugin: {0:s} unable to parse OLECF file with error: '
'{1!s}').format(plugin.NAME, exception))
if self._default_plugin and not parser_mediator.abort:
try:
self._default_plugin.UpdateChainAndProcess(
parser_mediator, root_item=root_item)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'plugin: {0:s} unable to parse OLECF file with error: '
'{1!s}').format(self._default_plugin.NAME, exception))
finally:
olecf_file.close()
| 288,755 |
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
event = event_values.get('event', None)
if event:
event_values['event_map'] = self.EVENT_NAMES.get(event, 'Unknown')
category = event_values.get('cat', None)
if category:
event_values['category_map'] = self.CATEGORY_NAMES.get(
category, 'Unknown')
action = event_values.get('action0', None)
if action:
event_values['action0_map'] = self.ACTION_0_NAMES.get(action, 'Unknown')
action = event_values.get('action1', None)
if action:
event_values['action1_map'] = self.ACTION_1_2_NAMES.get(
action, 'Unknown')
action = event_values.get('action2', None)
if action:
event_values['action2_map'] = self.ACTION_1_2_NAMES.get(
action, 'Unknown')
return self._ConditionalFormatMessages(event_values)
| 288,756 |
Extract device information from the iPod plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
devices = match.get('Devices', {})
for device_identifier, device_information in iter(devices.items()):
datetime_value = device_information.get('Connected', None)
if not datetime_value:
continue
event_data = IPodPlistEventData()
event_data.device_id = device_identifier
# TODO: refactor.
for key, value in iter(device_information.items()):
if key == 'Connected':
continue
attribute_name = key.lower().replace(' ', '_')
setattr(event_data, attribute_name, value)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,758 |
Initializes a credential configuration object.
Args:
credential_data (Optional[bytes]): credential data.
credential_type (Optional[str]): credential type.
path_spec (Optional[dfvfs.PathSpec]): path specification.
|
def __init__(
self, credential_data=None, credential_type=None, path_spec=None):
super(CredentialConfiguration, self).__init__()
self.credential_data = credential_data
self.credential_type = credential_type
self.path_spec = path_spec
| 288,759 |
Initializes an output mediator.
Args:
knowledge_base (KnowledgeBase): knowledge base.
formatter_mediator (FormatterMediator): formatter mediator.
fields_filter (Optional[FilterObject]): filter object that indicates
which fields to output.
preferred_encoding (Optional[str]): preferred encoding to output.
|
def __init__(
self, knowledge_base, formatter_mediator, fields_filter=None,
preferred_encoding='utf-8'):
super(OutputMediator, self).__init__()
self._formatter_mediator = formatter_mediator
self._knowledge_base = knowledge_base
self._preferred_encoding = preferred_encoding
self._timezone = pytz.UTC
self.fields_filter = fields_filter
| 288,764 |
Retrieves the event formatter for a specific event type.
Args:
event (EventObject): event.
Returns:
EventFormatter: event formatter or None.
|
def GetEventFormatter(self, event):
data_type = getattr(event, 'data_type', None)
if not data_type:
return None
return formatters_manager.FormattersManager.GetFormatterObject(
event.data_type)
| 288,765 |
Retrieves the formatted messages related to the event.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: full message string or None if no event formatter was found.
str: short message string or None if no event formatter was found.
|
def GetFormattedMessages(self, event):
event_formatter = self.GetEventFormatter(event)
if not event_formatter:
return None, None
return event_formatter.GetMessages(self._formatter_mediator, event)
| 288,766 |
Retrieves the formatted sources related to the event.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: full source string or None if no event formatter was found.
str: short source string or None if no event formatter was found.
|
def GetFormattedSources(self, event):
event_formatter = self.GetEventFormatter(event)
if not event_formatter:
return None, None
return event_formatter.GetSources(event)
| 288,767 |
Retrieves the attribute names in the format string.
Args:
event (EventObject): event.
Returns:
list[str]: list containing the attribute names. If no event formatter to
match the event can be found the function returns None.
|
def GetFormatStringAttributeNames(self, event):
event_formatter = self.GetEventFormatter(event)
if not event_formatter:
return None
return event_formatter.GetFormatStringAttributeNames()
| 288,768 |
Retrieves the hostname related to the event.
Args:
event (EventObject): event.
default_hostname (Optional[str]): default hostname.
Returns:
str: hostname.
|
def GetHostname(self, event, default_hostname='-'):
hostname = getattr(event, 'hostname', None)
if hostname:
return hostname
session_identifier = event.GetSessionIdentifier()
if session_identifier is None:
return default_hostname
hostname = self._knowledge_base.GetHostname(
session_identifier=session_identifier)
return hostname or default_hostname
| 288,769 |
Retrieves the MACB representation.
Args:
event (EventObject): event.
Returns:
str: MACB representation.
|
def GetMACBRepresentation(self, event):
data_type = getattr(event, 'data_type', None)
if not data_type:
return '....'
# The filestat parser is somewhat limited.
if data_type == 'fs:stat':
descriptions = event.timestamp_desc.split(';')
return_characters = ['.', '.', '.', '.']
for description in descriptions:
if description in (
'mtime', definitions.TIME_DESCRIPTION_MODIFICATION):
return_characters[0] = 'M'
elif description in (
'atime', definitions.TIME_DESCRIPTION_LAST_ACCESS):
return_characters[1] = 'A'
elif description in (
'ctime', definitions.TIME_DESCRIPTION_CHANGE):
return_characters[2] = 'C'
elif description in (
'crtime', definitions.TIME_DESCRIPTION_CREATION):
return_characters[3] = 'B'
return ''.join(return_characters)
# Access time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_LAST_ACCESS,
definitions.TIME_DESCRIPTION_ACCOUNT_CREATED,
definitions.TIME_DESCRIPTION_LAST_VISITED,
definitions.TIME_DESCRIPTION_START,
definitions.TIME_DESCRIPTION_LAST_SHUTDOWN,
definitions.TIME_DESCRIPTION_LAST_LOGIN,
definitions.TIME_DESCRIPTION_LAST_PASSWORD_RESET,
definitions.TIME_DESCRIPTION_LAST_CONNECTED,
definitions.TIME_DESCRIPTION_LAST_RUN,
definitions.TIME_DESCRIPTION_LAST_PRINTED]:
return '.A..'
# Content modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_MODIFICATION,
definitions.TIME_DESCRIPTION_WRITTEN,
definitions.TIME_DESCRIPTION_DELETED]:
return 'M...'
# Content creation time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CREATION,
definitions.TIME_DESCRIPTION_ADDED,
definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
definitions.TIME_DESCRIPTION_FIRST_CONNECTED]:
return '...B'
# Metadata modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CHANGE,
definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION]:
return '..C.'
return '....'
| 288,770 |
Retrieves the username related to the event.
Args:
event (EventObject): event.
default_username (Optional[str]): default username.
Returns:
str: username.
|
def GetUsername(self, event, default_username='-'):
username = getattr(event, 'username', None)
if username and username != '-':
return username
session_identifier = event.GetSessionIdentifier()
if session_identifier is None:
return default_username
user_sid = getattr(event, 'user_sid', None)
username = self._knowledge_base.GetUsernameByIdentifier(
user_sid, session_identifier=session_identifier)
return username or default_username
| 288,772 |
Sets the timezone.
Args:
timezone (str): timezone.
Raises:
ValueError: if the timezone is not supported.
|
def SetTimezone(self, timezone):
if not timezone:
return
try:
self._timezone = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise ValueError('Unsupported timezone: {0:s}'.format(timezone))
| 288,773 |
Deregisters an attribute container class.
The attribute container classes are identified based on their lower case
container type.
Args:
attribute_container_class (type): attribute container class.
Raises:
KeyError: if attribute container class is not set for
the corresponding container type.
|
def DeregisterAttributeContainer(cls, attribute_container_class):
container_type = attribute_container_class.CONTAINER_TYPE.lower()
if container_type not in cls._attribute_container_classes:
raise KeyError(
'Attribute container class not set for container type: '
'{0:s}.'.format(attribute_container_class.CONTAINER_TYPE))
del cls._attribute_container_classes[container_type]
| 288,774 |
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when the output filename was not provided.
|
def ParseOptions(cls, options, output_module):
if not isinstance(output_module, sqlite_4n6time.SQLite4n6TimeOutputModule):
raise errors.BadConfigObject(
'Output module is not an instance of SQLite4n6TimeOutputModule')
shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions(
options, output_module)
filename = getattr(options, 'write', None)
if not filename:
raise errors.BadConfigOption(
'Output filename was not provided use "-w filename" to specify.')
output_module.SetFilename(filename)
| 288,776 |
Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record.
|
def _GetFlagValues(self, flags):
event_types = []
for event_flag, description in self._FLAG_VALUES.items():
if event_flag & flags:
event_types.append(description)
return ', '.join(event_types)
| 288,777 |
Parses an OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
olecf_item (pyolecf.item): OLECF item.
Returns:
bool: True if an event was produced.
|
def _ParseItem(self, parser_mediator, olecf_item):
result = False
event_data = OLECFItemEventData()
event_data.name = olecf_item.name
event_data.offset = 0
event_data.size = olecf_item.size
creation_time, modification_time = self._GetTimestamps(olecf_item)
if creation_time:
date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
result = True
if modification_time:
date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
result = True
for sub_item in olecf_item.sub_items:
if self._ParseItem(parser_mediator, sub_item):
result = True
return result
| 288,779 |
Parses an OLECF file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root item is not set.
|
def Process(self, parser_mediator, root_item=None, **kwargs):
# This will raise if unhandled keyword arguments are passed.
super(DefaultOLECFPlugin, self).Process(parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
if not self._ParseItem(parser_mediator, root_item):
event_data = OLECFItemEventData()
event_data.name = root_item.name
event_data.offset = 0
event_data.size = root_item.size
# If no event was produced, produce at least one for the root item.
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,780 |
Initializes a tagging file.
Args:
path (str): path to a file that contains one or more event tagging rules.
|
def __init__(self, path):
super(TaggingFile, self).__init__()
self._path = path
| 288,781 |
Retrieves a specific string value from the data dict.
Args:
data_dict (dict[str, list[str]): values per name.
name (str): name of the value to retrieve.
default_value (Optional[object]): value to return if the name has no value
set in data_dict.
Returns:
str: value represented as a string.
|
def _GetStringValue(self, data_dict, name, default_value=None):
values = data_dict.get(name, None)
if not values:
return default_value
for index, value in enumerate(values):
if ',' in value:
values[index] = '"{0:s}"'.format(value)
return ', '.join(values)
| 288,785 |
Parses a CUPS IPP attribute from a file-like object.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
tuple[str, object]: attribute name and value.
Raises:
ParseError: if the attribute cannot be parsed.
|
def _ParseAttribute(self, file_object):
file_offset = file_object.tell()
attribute_map = self._GetDataTypeMap('cups_ipp_attribute')
try:
attribute, _ = self._ReadStructureFromFileObject(
file_object, file_offset, attribute_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse attribute with error: {0!s}'.format(exception))
value = None
if attribute.tag_value in self._INTEGER_TAG_VALUES:
# TODO: correct file offset to point to the start of value_data.
value = self._ParseIntegerValue(attribute.value_data, file_offset)
elif attribute.tag_value == self._TAG_VALUE_BOOLEAN:
value = self._ParseBooleanValue(attribute.value_data)
elif attribute.tag_value == self._TAG_VALUE_DATE_TIME:
# TODO: correct file offset to point to the start of value_data.
value = self._ParseDateTimeValue(attribute.value_data, file_offset)
elif attribute.tag_value in self._STRING_WITHOUT_LANGUAGE_VALUES:
value = attribute.value_data.decode(self._last_charset_attribute)
elif attribute.tag_value in self._ASCII_STRING_VALUES:
value = attribute.value_data.decode('ascii')
if attribute.tag_value == self._TAG_VALUE_CHARSET:
self._last_charset_attribute = value
else:
value = attribute.value_data
return attribute.name, value
| 288,786 |
Parses a CUPS IPP attributes group from a file-like object.
Args:
file_object (dfvfs.FileIO): file-like object.
Yields:
tuple[str, object]: attribute name and value.
Raises:
ParseError: if the attributes group cannot be parsed.
|
def _ParseAttributesGroup(self, file_object):
tag_value_map = self._GetDataTypeMap('int8')
tag_value = 0
while tag_value != self._DELIMITER_TAG_END_OF_ATTRIBUTES:
file_offset = file_object.tell()
tag_value, _ = self._ReadStructureFromFileObject(
file_object, file_offset, tag_value_map)
if tag_value >= 0x10:
file_object.seek(file_offset, os.SEEK_SET)
yield self._ParseAttribute(file_object)
elif (tag_value != self._DELIMITER_TAG_END_OF_ATTRIBUTES and
tag_value not in self._DELIMITER_TAGS):
raise errors.ParseError((
'Unsupported attributes groups start tag value: '
'0x{0:02x}.').format(tag_value))
| 288,787 |
Parses a boolean value.
Args:
byte_stream (bytes): byte stream.
Returns:
bool: boolean value.
Raises:
ParseError: when the boolean value cannot be parsed.
|
def _ParseBooleanValue(self, byte_stream):
if byte_stream == b'\x00':
return False
if byte_stream == b'\x01':
return True
raise errors.ParseError('Unsupported boolean value.')
| 288,788 |
Parses a CUPS IPP RFC2579 date-time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the RFC2579 date-time value cannot be parsed.
|
def _ParseDateTimeValue(self, byte_stream, file_offset):
datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')
try:
value = self._ReadStructureFromByteStream(
byte_stream, file_offset, datetime_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse datetime value with error: {0!s}'.format(exception))
direction_from_utc = chr(value.direction_from_utc)
rfc2579_date_time_tuple = (
value.year, value.month, value.day_of_month,
value.hours, value.minutes, value.seconds, value.deciseconds,
direction_from_utc, value.hours_from_utc, value.minutes_from_utc)
return dfdatetime_rfc2579_date_time.RFC2579DateTime(
rfc2579_date_time_tuple=rfc2579_date_time_tuple)
| 288,789 |
Parses an integer value.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
int: integer value.
Raises:
ParseError: when the integer value cannot be parsed.
|
def _ParseIntegerValue(self, byte_stream, file_offset):
data_type_map = self._GetDataTypeMap('int32be')
try:
return self._ReadStructureFromByteStream(
byte_stream, file_offset, data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(exception))
| 288,790 |
Parses a CUPS IPP header from a file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
|
def _ParseHeader(self, parser_mediator, file_object):
header_map = self._GetDataTypeMap('cups_ipp_header')
try:
header, _ = self._ReadStructureFromFileObject(file_object, 0, header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse header with error: {1!s}'.format(
self.NAME, exception))
format_version = '{0:d}.{1:d}'.format(
header.major_version, header.minor_version)
if format_version not in self._SUPPORTED_FORMAT_VERSIONS:
raise errors.UnableToParseFile(
'[{0:s}] Unsupported format version {1:s}.'.format(
self.NAME, format_version))
if header.operation_identifier != 5:
# TODO: generate ExtractionWarning instead of printing debug output.
display_name = parser_mediator.GetDisplayName()
logger.debug((
'[{0:s}] Non-standard operation identifier: 0x{1:08x} in file header '
'of: {2:s}.').format(
self.NAME, header.operation_identifier, display_name))
| 288,791 |
Parses a CUPS IPP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
self._last_charset_attribute = 'ascii'
self._ParseHeader(parser_mediator, file_object)
data_dict = {}
time_dict = {}
try:
for name, value in self._ParseAttributesGroup(file_object):
name = self._ATTRIBUTE_NAME_TRANSLATION.get(name, name)
if name in self._DATE_TIME_VALUE_NAMES:
time_dict.setdefault(name, []).append(value)
else:
data_dict.setdefault(name, []).append(value)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse attributes with error: {0!s}'.format(exception))
return
event_data = CupsIppEventData()
event_data.application = self._GetStringValue(data_dict, 'application')
event_data.computer_name = self._GetStringValue(data_dict, 'computer_name')
event_data.copies = data_dict.get('copies', [0])[0]
event_data.data_dict = data_dict
event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')
event_data.job_id = self._GetStringValue(data_dict, 'job_id')
event_data.job_name = self._GetStringValue(data_dict, 'job_name')
event_data.user = self._GetStringValue(data_dict, 'user')
event_data.owner = self._GetStringValue(data_dict, 'owner')
event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')
event_data.uri = self._GetStringValue(data_dict, 'uri')
for name, usage in iter(self._DATE_TIME_VALUES.items()):
for date_time in time_dict.get(name, []):
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
for name, usage in iter(self._POSIX_TIME_VALUES.items()):
for time_value in time_dict.get(name, []):
date_time = dfdatetime_posix_time.PosixTime(timestamp=time_value)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,792 |
Creates an event tag.
Args:
event (EventObject): event to tag.
comment (str): event tag comment.
labels (list[str]): event tag labels.
Returns:
EventTag: the event tag.
|
def _CreateEventTag(self, event, comment, labels):
event_identifier = event.GetIdentifier()
event_tag = events.EventTag(comment=comment)
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
event_identifier_string = event_identifier.CopyToString()
logger.debug('Created event tag: {0:s} for event: {1:s}'.format(
comment, event_identifier_string))
return event_tag
| 288,794 |
Initializes a hash tagging analysis plugin.
Args:
analyzer_class (type): a subclass of HashAnalyzer that will be
instantiated by the plugin.
|
def __init__(self, analyzer_class):
super(HashTaggingAnalysisPlugin, self).__init__()
self._analysis_queue_timeout = self.DEFAULT_QUEUE_TIMEOUT
self._analyzer_started = False
self._comment = 'Tag applied by {0:s} analysis plugin'.format(self.NAME)
self._event_identifiers_by_pathspec = collections.defaultdict(list)
self._hash_pathspecs = collections.defaultdict(list)
self._requester_class = None
self._time_of_last_status_log = time.time()
self.hash_analysis_queue = Queue.Queue()
self.hash_queue = Queue.Queue()
self._analyzer = analyzer_class(self.hash_queue, self.hash_analysis_queue)
| 288,795 |
Evaluates whether an event contains the right data for a hash lookup.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
|
def ExamineEvent(self, mediator, event):
self._EnsureRequesterStarted()
path_spec = event.pathspec
event_identifiers = self._event_identifiers_by_pathspec[path_spec]
event_identifier = event.GetIdentifier()
event_identifiers.append(event_identifier)
if event.data_type not in self.DATA_TYPES or not self._analyzer.lookup_hash:
return
lookup_hash = '{0:s}_hash'.format(self._analyzer.lookup_hash)
lookup_hash = getattr(event, lookup_hash, None)
if not lookup_hash:
display_name = mediator.GetDisplayNameForPathSpec(path_spec)
logger.warning((
'Lookup hash attribute: {0:s}_hash missing from event that '
'originated from: {1:s}.').format(
self._analyzer.lookup_hash, display_name))
return
path_specs = self._hash_pathspecs[lookup_hash]
path_specs.append(path_spec)
# There may be multiple path specification that have the same hash. We only
# want to look them up once.
if len(path_specs) == 1:
self.hash_queue.put(lookup_hash)
| 288,798 |
Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
|
def CompileReport(self, mediator):
# TODO: refactor to update the counter on demand instead of
# during reporting.
path_specs_per_labels_counter = collections.Counter()
tags = []
while self._ContinueReportCompilation():
try:
self._LogProgressUpdateIfReasonable()
hash_analysis = self.hash_analysis_queue.get(
timeout=self._analysis_queue_timeout)
except Queue.Empty:
# The result queue is empty, but there could still be items that need
# to be processed by the analyzer.
continue
pathspecs, labels, new_tags = self._HandleHashAnalysis(
hash_analysis)
tags.extend(new_tags)
for label in labels:
path_specs_per_labels_counter[label] += len(pathspecs)
self._analyzer.SignalAbort()
lines_of_text = ['{0:s} hash tagging results'.format(self.NAME)]
for label, count in sorted(path_specs_per_labels_counter.items()):
line_of_text = (
'{0:d} path specifications tagged with label: {1:s}'.format(
count, label))
lines_of_text.append(line_of_text)
lines_of_text.append('')
report_text = '\n'.join(lines_of_text)
for event_tag in tags:
mediator.ProduceEventTag(event_tag)
return reports.AnalysisReport(
plugin_name=self.NAME, text=report_text)
| 288,801 |
Initializes a hash analyzer.
Args:
hash_queue (Queue.queue): contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): queue that the analyzer will append
HashAnalysis objects to.
hashes_per_batch (Optional[int]): number of hashes to analyze at once.
lookup_hash (Optional[str]): name of the hash attribute to look up.
wait_after_analysis (Optional[int]): number of seconds to wait after each
batch is analyzed.
|
def __init__(
self, hash_queue, hash_analysis_queue, hashes_per_batch=1,
lookup_hash='sha256', wait_after_analysis=0):
super(HashAnalyzer, self).__init__()
self._abort = False
self._hash_queue = hash_queue
self._hash_analysis_queue = hash_analysis_queue
self.analyses_performed = 0
self.hashes_per_batch = hashes_per_batch
self.lookup_hash = lookup_hash
self.seconds_spent_analyzing = 0
self.wait_after_analysis = wait_after_analysis
| 288,803 |
Retrieves a list of items from a queue.
Args:
target_queue (Queue.queue): queue to retrieve hashes from.
max_hashes (int): maximum number of items to retrieve from the
target_queue.
Returns:
list[object]: list of at most max_hashes elements from the target_queue.
The list may have no elements if the target_queue is empty.
|
def _GetHashes(self, target_queue, max_hashes):
hashes = []
for _ in range(0, max_hashes):
try:
item = target_queue.get_nowait()
except Queue.Empty:
continue
hashes.append(item)
return hashes
| 288,804 |
Sets the hash to query.
Args:
lookup_hash (str): name of the hash attribute to look up.
Raises:
ValueError: if the lookup hash is not supported.
|
def SetLookupHash(self, lookup_hash):
if lookup_hash not in self.SUPPORTED_HASHES:
raise ValueError('Unsupported lookup hash: {0!s}'.format(lookup_hash))
self.lookup_hash = lookup_hash
| 288,806 |
Initializes a HTTP hash analyzer.
Args:
hash_queue (Queue.queue): a queue that contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): queue that the analyzer will append
HashAnalysis objects to.
|
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
super(HTTPHashAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._checked_for_old_python_version = False
| 288,807 |
Initializes analysis information about a hash.
Args:
subject_hash (str): hash that the hash_information relates to.
hash_information (object): information about the hash. This object will be
used by the GenerateLabels method in the HashTaggingAnalysisPlugin
to tag events that relate to the hash.
|
def __init__(self, subject_hash, hash_information):
self.hash_information = hash_information
self.subject_hash = subject_hash
| 288,810 |
Parses an application usage row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
def ParseApplicationUsageRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
# TODO: replace usage by definition(s) in eventdata. Not sure which values
# it will hold here.
application_name = self._GetRowValue(query_hash, row, 'event')
usage = 'Application {0:s}'.format(application_name)
event_data = MacOSApplicationUsageEventData()
event_data.application = self._GetRowValue(query_hash, row, 'app_path')
event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')
event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')
event_data.count = self._GetRowValue(query_hash, row, 'number_times')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'last_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,812 |
Initializes the time slice.
Args:
event_timestamp (int): event timestamp of the time slice or None.
duration (Optional[int]): duration of the time slice in minutes.
The default is 5, which represent 2.5 minutes before and 2.5 minutes
after the event timestamp.
|
def __init__(self, event_timestamp, duration=5):
super(TimeSlice, self).__init__()
self.duration = duration
self.event_timestamp = event_timestamp
| 288,813 |
Processes a file-like object with analyzers.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_object (dfvfs.FileIO): file-like object to process.
|
def _AnalyzeFileObject(self, mediator, file_object):
maximum_read_size = max([
analyzer_object.SIZE_LIMIT for analyzer_object in self._analyzers])
hashers_only = True
for analyzer_object in self._analyzers:
if not isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer):
hashers_only = False
break
file_size = file_object.get_size()
if (hashers_only and self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
return
file_object.seek(0, os.SEEK_SET)
data = file_object.read(maximum_read_size)
while data:
if self._abort:
break
for analyzer_object in self._analyzers:
if self._abort:
break
if (not analyzer_object.INCREMENTAL_ANALYZER and
file_size > analyzer_object.SIZE_LIMIT):
continue
if (isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer) and
self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
continue
self.processing_status = analyzer_object.PROCESSING_STATUS_HINT
analyzer_object.Analyze(data)
self.last_activity_timestamp = time.time()
data = file_object.read(maximum_read_size)
display_name = mediator.GetDisplayName()
for analyzer_object in self._analyzers:
if self._abort:
break
for result in analyzer_object.GetResults():
logger.debug((
'[AnalyzeFileObject] attribute {0:s}:{1:s} calculated for '
'file: {2:s}.').format(
result.attribute_name, result.attribute_value, display_name))
mediator.AddEventAttribute(
result.attribute_name, result.attribute_value)
analyzer_object.Reset()
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
| 288,818 |
Determines if analysis and extraction of a data stream can be skipped.
This is used to prevent Plaso trying to run analyzers or extract content
from a pipe or socket it encounters while processing a mounted filesystem.
Args:
file_entry (dfvfs.FileEntry): file entry to consider for skipping.
data_stream (dfvfs.DataStream): data stream to consider for skipping.
Returns:
bool: True if the data stream can be skipped.
|
def _CanSkipDataStream(self, file_entry, data_stream):
if file_entry.IsFile():
return False
if data_stream.IsDefault():
return True
return False
| 288,819 |
Determines if content extraction of a file entry can be skipped.
Args:
file_entry (dfvfs.FileEntry): file entry of which to determine content
extraction can be skipped.
Returns:
bool: True if content extraction can be skipped.
|
def _CanSkipContentExtraction(self, file_entry):
# TODO: make this filtering solution more generic. Also see:
# https://github.com/log2timeline/plaso/issues/467
location = getattr(file_entry.path_spec, 'location', None)
if not location:
return False
data_stream_name = getattr(file_entry.path_spec, 'data_stream', None)
if data_stream_name:
return False
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(location)
if not path_segments:
return False
if self._CHROME_CACHE_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-1]
location_segments.append('index')
location = file_system.JoinPath(location_segments)
index_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(index_path_spec):
# TODO: improve this check if "index" is a Chrome Cache index file.
return True
elif self._FIREFOX_CACHE_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-4]
location_segments.append('_CACHE_MAP_')
location = file_system.JoinPath(location_segments)
cache_map_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(cache_map_path_spec):
# TODO: improve this check if "_CACHE_MAP_" is a Firefox Cache
# version 1 cache map file.
return True
elif self._FIREFOX_CACHE2_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-2]
location_segments.append('index')
location = file_system.JoinPath(location_segments)
index_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(index_path_spec):
# TODO: improve this check if "index" is a Firefox Cache version 2
# index file.
return True
elif len(path_segments) == 1 and path_segments[0].lower() in (
'hiberfil.sys', 'pagefile.sys', 'swapfile.sys'):
return True
return False
| 288,820 |
Extracts content from a data stream.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract its content.
data_stream_name (str): name of the data stream whose content is to be
extracted.
|
def _ExtractContentFromDataStream(
self, mediator, file_entry, data_stream_name):
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseDataStream(
mediator, file_entry, data_stream_name)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
self.last_activity_timestamp = time.time()
| 288,821 |
Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
|
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):
# Do not extract metadata from the root file entry when it is virtual.
if file_entry.IsRoot() and file_entry.type_indicator not in (
self._TYPES_WITH_ROOT_METADATA):
return
# We always want to extract the file entry metadata but we only want
# to parse it once per file entry, so we only use it if we are
# processing the default data stream of regular files.
if data_stream and not data_stream.IsDefault():
return
display_name = mediator.GetDisplayName()
logger.debug(
'[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(
display_name))
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
| 288,822 |
Determines if a data stream contains an archive such as: TAR or ZIP.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification of the data stream.
Returns:
list[str]: dfVFS archive type indicators found in the data stream.
|
def _GetArchiveTypes(self, mediator, path_spec):
try:
type_indicators = analyzer.Analyzer.GetArchiveTypeIndicators(
path_spec, resolver_context=mediator.resolver_context)
except IOError as exception:
type_indicators = []
warning_message = (
'analyzer failed to determine archive type indicators '
'with error: {0!s}').format(exception)
mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)
return type_indicators
| 288,823 |
Determines if a data stream contains a compressed stream such as: gzip.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification of the data stream.
Returns:
list[str]: dfVFS compressed stream type indicators found in
the data stream.
|
def _GetCompressedStreamTypes(self, mediator, path_spec):
try:
type_indicators = analyzer.Analyzer.GetCompressedStreamTypeIndicators(
path_spec, resolver_context=mediator.resolver_context)
except IOError as exception:
type_indicators = []
warning_message = (
'analyzer failed to determine compressed stream type indicators '
'with error: {0!s}').format(exception)
mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)
return type_indicators
| 288,824 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.