docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Prints information about the tasks.
Args:
storage_reader (StorageReader): storage reader.
|
def _PrintTasksInformation(self, storage_reader):
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title='Tasks')
for task_start, _ in storage_reader.GetSessions():
start_time = timelib.Timestamp.CopyToIsoFormat(
task_start.timestamp)
task_identifier = uuid.UUID(hex=task_start.identifier)
task_identifier = '{0!s}'.format(task_identifier)
table_view.AddRow([task_identifier, start_time])
table_view.Write(self._output_writer)
| 288,403 |
Parses the options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
def ParseOptions(self, options):
self._ParseInformationalOptions(options)
self._verbose = getattr(options, 'verbose', False)
self._output_filename = getattr(options, 'write', None)
argument_helper_names = ['process_resources', 'storage_file']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
# TODO: move check into _CheckStorageFile.
if not self._storage_file_path:
raise errors.BadConfigOption('Missing storage file option.')
if not os.path.isfile(self._storage_file_path):
raise errors.BadConfigOption(
'No such storage file: {0:s}.'.format(self._storage_file_path))
compare_storage_file_path = self.ParseStringOption(
options, 'compare_storage_file')
if compare_storage_file_path:
if not os.path.isfile(compare_storage_file_path):
raise errors.BadConfigOption(
'No such storage file: {0:s}.'.format(compare_storage_file_path))
self._compare_storage_file_path = compare_storage_file_path
self.compare_storage_information = True
self._output_format = self.ParseStringOption(options, 'output_format')
if self._output_filename:
if os.path.exists(self._output_filename):
raise errors.BadConfigOption(
'Output file already exists: {0:s}.'.format(self._output_filename))
output_file_object = open(self._output_filename, 'wb')
self._output_writer = tools.FileObjectOutputWriter(output_file_object)
self._EnforceProcessMemoryLimit(self._process_memory_limit)
| 288,406 |
Retrieves the file system type indicator of a file entry.
Args:
file_entry (dfvfs.FileEntry): a file entry.
Returns:
str: file system type.
|
def _GetFileSystemTypeFromFileEntry(self, file_entry):
if file_entry.type_indicator != dfvfs_definitions.TYPE_INDICATOR_TSK:
return file_entry.type_indicator
# TODO: Implement fs_type in dfVFS and remove this implementation
# once that is in place.
file_system = file_entry.GetFileSystem()
fs_info = file_system.GetFsInfo()
if fs_info.info:
type_string = '{0!s}'.format(fs_info.info.ftype)
if type_string.startswith('TSK_FS_TYPE_'):
type_string = type_string[12:]
if type_string.endswith('_DETECT'):
type_string = type_string[:-7]
return type_string
| 288,409 |
Parses a file entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): a file entry.
|
def ParseFileEntry(self, parser_mediator, file_entry):
stat_object = file_entry.GetStat()
if not stat_object:
return
file_system_type = self._GetFileSystemTypeFromFileEntry(file_entry)
event_data = FileStatEventData()
event_data.file_entry_type = stat_object.type
event_data.file_size = getattr(stat_object, 'size', None)
event_data.file_system_type = file_system_type
event_data.is_allocated = file_entry.IsAllocated()
if file_entry.access_time:
event = time_events.DateTimeValuesEvent(
file_entry.access_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.creation_time:
event = time_events.DateTimeValuesEvent(
file_entry.creation_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.change_time:
event = time_events.DateTimeValuesEvent(
file_entry.change_time, definitions.TIME_DESCRIPTION_CHANGE)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.modification_time:
event = time_events.DateTimeValuesEvent(
file_entry.modification_time,
definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for time_attribute, usage in self._TIMESTAMP_DESCRIPTIONS.items():
posix_time = getattr(stat_object, time_attribute, None)
if posix_time is None:
continue
nano_time_attribute = '{0:s}_nano'.format(time_attribute)
nano_time_attribute = getattr(stat_object, nano_time_attribute, None)
timestamp = posix_time * 1000000
if nano_time_attribute is not None:
# Note that the _nano values are in intervals of 100th nano seconds.
micro_time_attribute, _ = divmod(nano_time_attribute, 10)
timestamp += micro_time_attribute
# TSK will return 0 if the timestamp is not set.
if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and
not timestamp):
continue
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,410 |
Formats an argument token as a dictionary of values.
Args:
token_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or
AUT_ARG64 token data.
Returns:
dict[str, str]: token values.
|
def _FormatArgToken(self, token_data):
return {
'string': token_data.argument_value.rstrip('\x00'),
'num_arg': token_data.argument_index,
'is': token_data.argument_name}
| 288,412 |
Formats an attribute token as a dictionary of values.
Args:
token_data (bsm_token_data_attr32|bsm_token_data_attr64): AUT_ATTR32 or
AUT_ATTR64 token data.
Returns:
dict[str, str]: token values.
|
def _FormatAttrToken(self, token_data):
return {
'mode': token_data.file_mode,
'uid': token_data.user_identifier,
'gid': token_data.group_identifier,
'system_id': token_data.file_system_identifier,
'node_id': token_data.file_identifier,
'device': token_data.device}
| 288,413 |
Formats a data token as a dictionary of values.
Args:
token_data (bsm_token_data_data): AUT_DATA token data.
Returns:
dict[str, str]: token values.
|
def _FormatDataToken(self, token_data):
format_string = bsmtoken.BSM_TOKEN_DATA_PRINT.get(
token_data.data_format, 'UNKNOWN')
if token_data.data_format == 4:
data = bytes(bytearray(token_data.data)).split(b'\x00')[0]
data = data.decode('utf-8')
else:
data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])
return {
'format': format_string,
'data': data}
| 288,414 |
Formats an extended IPv4 address token as a dictionary of values.
Args:
token_data (bsm_token_data_in_addr_ex): AUT_IN_ADDR_EX token data.
Returns:
dict[str, str]: token values.
|
def _FormatInAddrExToken(self, token_data):
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.net_type, 'UNKNOWN')
if token_data.net_type == 4:
ip_address = self._FormatPackedIPv6Address(token_data.ip_address[:4])
elif token_data.net_type == 16:
ip_address = self._FormatPackedIPv6Address(token_data.ip_address)
return {
'protocols': protocol,
'net_type': token_data.net_type,
'address': ip_address}
| 288,415 |
Formats an IPC permissions token as a dictionary of values.
Args:
token_data (bsm_token_data_ipc_perm): AUT_IPC_PERM token data.
Returns:
dict[str, str]: token values.
|
def _FormatIPCPermToken(self, token_data):
return {
'user_id': token_data.user_identifier,
'group_id': token_data.group_identifier,
'creator_user_id': token_data.creator_user_identifier,
'creator_group_id': token_data.creator_group_identifier,
'access': token_data.access_mode}
| 288,416 |
Formats an IPv4 packet header token as a dictionary of values.
Args:
token_data (bsm_token_data_ip): AUT_IP token data.
Returns:
dict[str, str]: token values.
|
def _FormatIPToken(self, token_data):
data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])
return {'IPv4_Header': data}
| 288,417 |
Formats an opaque token as a dictionary of values.
Args:
token_data (bsm_token_data_opaque): AUT_OPAQUE token data.
Returns:
dict[str, str]: token values.
|
def _FormatOpaqueToken(self, token_data):
data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])
return {'data': data}
| 288,418 |
Formats an other file token as a dictionary of values.
Args:
token_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data.
Returns:
dict[str, str]: token values.
|
def _FormatOtherFileToken(self, token_data):
# TODO: if this timestamp is useful, it must be extracted as a separate
# event object.
timestamp = token_data.microseconds + (
token_data.timestamp * definitions.MICROSECONDS_PER_SECOND)
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
date_time_string = date_time.CopyToDateTimeString()
return {
'string': token_data.name.rstrip('\x00'),
'timestamp': date_time_string}
| 288,419 |
Formats a return or exit token as a dictionary of values.
Args:
token_data (bsm_token_data_exit|bsm_token_data_return32|
bsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or
AUT_RETURN64 token data.
Returns:
dict[str, str]: token values.
|
def _FormatReturnOrExitToken(self, token_data):
error_string = bsmtoken.BSM_ERRORS.get(token_data.status, 'UNKNOWN')
return {
'error': error_string,
'token_status': token_data.status,
'call_status': token_data.return_value}
| 288,420 |
Formats an extended socket token as a dictionary of values.
Args:
token_data (bsm_token_data_socket_ex): AUT_SOCKET_EX token data.
Returns:
dict[str, str]: token values.
|
def _FormatSocketExToken(self, token_data):
if token_data.socket_domain == 10:
local_ip_address = self._FormatPackedIPv6Address(
token_data.local_ip_address)
remote_ip_address = self._FormatPackedIPv6Address(
token_data.remote_ip_address)
else:
local_ip_address = self._FormatPackedIPv4Address(
token_data.local_ip_address)
remote_ip_address = self._FormatPackedIPv4Address(
token_data.remote_ip_address)
return {
'from': local_ip_address,
'from_port': token_data.local_port,
'to': remote_ip_address,
'to_port': token_data.remote_port}
| 288,421 |
Formats an Internet socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockinet32): AUT_SOCKINET32 token data.
Returns:
dict[str, str]: token values.
|
def _FormatSocketInet32Token(self, token_data):
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')
ip_address = self._FormatPackedIPv4Address(token_data.ip_addresss)
return {
'protocols': protocol,
'family': token_data.socket_family,
'port': token_data.port_number,
'address': ip_address}
| 288,422 |
Formats an Internet socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockinet64): AUT_SOCKINET128 token data.
Returns:
dict[str, str]: token values.
|
def _FormatSocketInet128Token(self, token_data):
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')
ip_address = self._FormatPackedIPv6Address(token_data.ip_addresss)
return {
'protocols': protocol,
'family': token_data.socket_family,
'port': token_data.port_number,
'address': ip_address}
| 288,423 |
Formats an Unix socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data.
Returns:
dict[str, str]: token values.
|
def _FormatSocketUnixToken(self, token_data):
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')
return {
'protocols': protocol,
'family': token_data.socket_family,
'path': token_data.socket_path}
| 288,424 |
Formats a subject or process token as a dictionary of values.
Args:
token_data (bsm_token_data_subject32|bsm_token_data_subject64):
AUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token
data.
Returns:
dict[str, str]: token values.
|
def _FormatSubjectOrProcessToken(self, token_data):
ip_address = self._FormatPackedIPv4Address(token_data.ip_address)
return {
'aid': token_data.audit_user_identifier,
'euid': token_data.effective_user_identifier,
'egid': token_data.effective_group_identifier,
'uid': token_data.real_user_identifier,
'gid': token_data.real_group_identifier,
'pid': token_data.process_identifier,
'session_id': token_data.session_identifier,
'terminal_port': token_data.terminal_port,
'terminal_ip': ip_address}
| 288,425 |
Formats a subject or process token as a dictionary of values.
Args:
token_data (bsm_token_data_subject32_ex|bsm_token_data_subject64_ex):
AUT_SUBJECT32_EX, AUT_PROCESS32_EX, AUT_SUBJECT64_EX or
AUT_PROCESS64_EX token data.
Returns:
dict[str, str]: token values.
|
def _FormatSubjectExOrProcessExToken(self, token_data):
if token_data.net_type == 4:
ip_address = self._FormatPackedIPv4Address(token_data.ip_address)
elif token_data.net_type == 16:
ip_address = self._FormatPackedIPv6Address(token_data.ip_address)
else:
ip_address = 'unknown'
return {
'aid': token_data.audit_user_identifier,
'euid': token_data.effective_user_identifier,
'egid': token_data.effective_group_identifier,
'uid': token_data.real_user_identifier,
'gid': token_data.real_group_identifier,
'pid': token_data.process_identifier,
'session_id': token_data.session_identifier,
'terminal_port': token_data.terminal_port,
'terminal_ip': ip_address}
| 288,426 |
Formats the token data as a dictionary of values.
Args:
token_type (int): token type.
token_data (object): token data.
Returns:
dict[str, str]: formatted token values or an empty dictionary if no
formatted token values could be determined.
|
def _FormatTokenData(self, token_type, token_data):
token_data_format_function = self._TOKEN_DATA_FORMAT_FUNCTIONS.get(
token_type)
if token_data_format_function:
token_data_format_function = getattr(
self, token_data_format_function, None)
if not token_data_format_function:
return {}
return token_data_format_function(token_data)
| 288,427 |
Parses an event record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
ParseError: if the event record cannot be read.
|
def _ParseRecord(self, parser_mediator, file_object):
header_record_offset = file_object.tell()
# Check the header token type before reading the token data to prevent
# variable size tokens to consume a large amount of memory.
token_type = self._ParseTokenType(file_object, header_record_offset)
if token_type not in self._HEADER_TOKEN_TYPES:
raise errors.ParseError(
'Unsupported header token type: 0x{0:02x}'.format(token_type))
token_type, token_data = self._ParseToken(file_object, header_record_offset)
if token_data.format_version != 11:
raise errors.ParseError('Unsupported format version type: {0:d}'.format(
token_data.format_version))
timestamp = token_data.microseconds + (
token_data.timestamp * definitions.MICROSECONDS_PER_SECOND)
event_type = token_data.event_type
header_record_size = token_data.record_size
record_end_offset = header_record_offset + header_record_size
event_tokens = []
return_token_values = None
file_offset = file_object.tell()
while file_offset < record_end_offset:
token_type, token_data = self._ParseToken(file_object, file_offset)
if not token_data:
raise errors.ParseError('Unsupported token type: 0x{0:02x}'.format(
token_type))
file_offset = file_object.tell()
if token_type == self._TOKEN_TYPE_AUT_TRAILER:
break
token_type_string = self._TOKEN_TYPES.get(token_type, 'UNKNOWN')
token_values = self._FormatTokenData(token_type, token_data)
event_tokens.append({token_type_string: token_values})
if token_type in (
self._TOKEN_TYPE_AUT_RETURN32, self._TOKEN_TYPE_AUT_RETURN64):
return_token_values = token_values
if token_data.signature != self._TRAILER_TOKEN_SIGNATURE:
raise errors.ParseError('Unsupported signature in trailer token.')
if token_data.record_size != header_record_size:
raise errors.ParseError(
'Mismatch of event record size between header and trailer token.')
event_data = BSMEventData()
event_data.event_type = event_type
event_data.extra_tokens = event_tokens
event_data.offset = header_record_offset
event_data.record_length = header_record_size
event_data.return_value = return_token_values
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,428 |
Parses a token.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
tuple: containing:
int: token type
object: token data or None if the token type is not supported.
|
def _ParseToken(self, file_object, file_offset):
token_type = self._ParseTokenType(file_object, file_offset)
token_data = None
token_data_map_name = self._DATA_TYPE_MAP_PER_TOKEN_TYPE.get(
token_type, None)
if token_data_map_name:
token_data_map = self._GetDataTypeMap(token_data_map_name)
token_data, _ = self._ReadStructureFromFileObject(
file_object, file_offset + 1, token_data_map)
return token_type, token_data
| 288,429 |
Parses a token type.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
int: token type
|
def _ParseTokenType(self, file_object, file_offset):
token_type_map = self._GetDataTypeMap('uint8')
token_type, _ = self._ReadStructureFromFileObject(
file_object, file_offset, token_type_map)
return token_type
| 288,430 |
Parses a BSM file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
file_offset = file_object.get_offset()
file_size = file_object.get_size()
while file_offset < file_size:
try:
self._ParseRecord(parser_mediator, file_object)
except errors.ParseError as exception:
if file_offset == 0:
raise errors.UnableToParseFile(
'Unable to parse first event record with error: {0!s}'.format(
exception))
# TODO: skip to next event record.
file_offset = file_object.get_offset()
| 288,431 |
Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
|
def ParseRow(self, parser_mediator, row_offset, row):
time_elements_tuple = self._GetTimeElementsTuple(row['time'])
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
# TODO: remove unused attributes.
event_data = SymantecEventData()
event_data.access = row.get('access', None)
event_data.action0 = row.get('action0', None)
event_data.action1 = row.get('action1', None)
event_data.action1_status = row.get('action1_status', None)
event_data.action2 = row.get('action2', None)
event_data.action2_status = row.get('action2_status', None)
event_data.address = row.get('address', None)
event_data.backup_id = row.get('backup_id', None)
event_data.cat = row.get('cat', None)
event_data.cleaninfo = row.get('cleaninfo', None)
event_data.clientgroup = row.get('clientgroup', None)
event_data.compressed = row.get('compressed', None)
event_data.computer = row.get('computer', None)
event_data.definfo = row.get('definfo', None)
event_data.defseqnumber = row.get('defseqnumber', None)
event_data.deleteinfo = row.get('deleteinfo', None)
event_data.depth = row.get('depth', None)
event_data.description = row.get('description', None)
event_data.domain_guid = row.get('domain_guid', None)
event_data.domainname = row.get('domainname', None)
event_data.err_code = row.get('err_code', None)
event_data.event_data = row.get('event_data', None)
event_data.event = row.get('event', None)
event_data.extra = row.get('extra', None)
event_data.file = row.get('file', None)
event_data.flags = row.get('flags', None)
event_data.groupid = row.get('groupid', None)
event_data.guid = row.get('guid', None)
event_data.license_expiration_dt = row.get('license_expiration_dt', None)
event_data.license_feature_name = row.get('license_feature_name', None)
event_data.license_feature_ver = row.get('license_feature_ver', None)
event_data.license_fulfillment_id = row.get('license_fulfillment_id', None)
event_data.license_lifecycle = row.get('license_lifecycle', None)
event_data.license_seats_delta = row.get('license_seats_delta', None)
event_data.license_seats = row.get('license_seats', None)
event_data.license_seats_total = row.get('license_seats_total', None)
event_data.license_serial_num = row.get('license_serial_num', None)
event_data.license_start_dt = row.get('license_start_dt', None)
event_data.logger = row.get('logger', None)
event_data.login_domain = row.get('login_domain', None)
event_data.log_session_guid = row.get('log_session_guid', None)
event_data.macaddr = row.get('macaddr', None)
event_data.new_ext = row.get('new_ext', None)
event_data.ntdomain = row.get('ntdomain', None)
event_data.offset = row_offset
event_data.parent = row.get('parent', None)
event_data.quarfwd_status = row.get('quarfwd_status', None)
event_data.remote_machine_ip = row.get('remote_machine_ip', None)
event_data.remote_machine = row.get('remote_machine', None)
event_data.scanid = row.get('scanid', None)
event_data.snd_status = row.get('snd_status', None)
event_data.status = row.get('status', None)
event_data.still_infected = row.get('still_infected', None)
event_data.time = row.get('time', None)
event_data.user = row.get('user', None)
event_data.vbin_id = row.get('vbin_id', None)
event_data.vbin_session_id = row.get('vbin_session_id', None)
event_data.version = row.get('version:', None)
event_data.virus_id = row.get('virus_id', None)
event_data.virus = row.get('virus', None)
event_data.virustype = row.get('virustype', None)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,434 |
Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
|
def VerifyRow(self, parser_mediator, row):
try:
time_elements_tuple = self._GetTimeElementsTuple(row['time'])
except (TypeError, ValueError):
return False
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
return False
try:
my_event = int(row['event'], 10)
except (TypeError, ValueError):
return False
if my_event < 1 or my_event > 77:
return False
try:
category = int(row['cat'], 10)
except (TypeError, ValueError):
return False
if category < 1 or category > 4:
return False
return True
| 288,435 |
Extracts relevant Volume Configuration Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
stores = match.get('Stores', {})
for volume_name, volume in iter(stores.items()):
datetime_value = volume.get('CreationDate', None)
if not datetime_value:
continue
partial_path = volume['PartialPath']
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format(
volume_name, partial_path)
event_data.key = ''
event_data.root = '/Stores'
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,436 |
Initializes Windows Registry key filter.
Args:
user_assist_guid (str): UserAssist GUID.
|
def __init__(self, user_assist_guid):
key_path = self._KEY_PATH_FORMAT.format(user_assist_guid)
super(UserAssistWindowsRegistryKeyPathFilter, self).__init__(key_path)
| 288,438 |
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
version_value = registry_key.GetValueByName('Version')
count_subkey = registry_key.GetSubkeyByName('Count')
if not version_value:
parser_mediator.ProduceExtractionWarning('missing version value')
return
if not version_value.DataIsInteger():
parser_mediator.ProduceExtractionWarning(
'unsupported version value data type')
return
format_version = version_value.GetDataAsObject()
if format_version not in (3, 5):
parser_mediator.ProduceExtractionWarning(
'unsupported format version: {0:d}'.format(format_version))
return
if not count_subkey:
parser_mediator.ProduceExtractionWarning('missing count subkey')
return
userassist_entry_index = 0
for registry_value in count_subkey.GetValues():
try:
# Note that Python 2 codecs.decode() does not support keyword arguments
# such as encodings='rot-13'.
value_name = codecs.decode(registry_value.name, 'rot-13')
except UnicodeEncodeError as exception:
logger.debug((
'Unable to decode UserAssist string: {0:s} with error: {1!s}.\n'
'Attempting piecewise decoding.').format(
registry_value.name, exception))
characters = []
for char in registry_value.name:
if ord(char) < 128:
try:
characters.append(char.decode('rot-13'))
except UnicodeEncodeError:
characters.append(char)
else:
characters.append(char)
value_name = ''.join(characters)
if format_version == 5:
path_segments = value_name.split('\\')
for segment_index, path_segment in enumerate(path_segments):
# Remove the { } from the path segment to get the GUID.
guid = path_segments[segment_index][1:-1]
path_segments[segment_index] = known_folder_ids.PATHS.get(
guid, path_segment)
value_name = '\\'.join(path_segments)
# Check if we might need to substitute values.
if '%' in value_name:
# TODO: fix missing self._knowledge_base
# pylint: disable=no-member
environment_variables = self._knowledge_base.GetEnvironmentVariables()
value_name = path_helper.PathHelper.ExpandWindowsPath(
value_name, environment_variables)
if value_name == 'UEME_CTLSESSION':
continue
if format_version == 3:
entry_map = self._GetDataTypeMap('user_assist_entry_v3')
elif format_version == 5:
entry_map = self._GetDataTypeMap('user_assist_entry_v5')
else:
parser_mediator.ProduceExtractionWarning(
'unsupported format version: {0:d}'.format(format_version))
continue
if not registry_value.DataIsBinaryData():
parser_mediator.ProduceExtractionWarning(
'unsupported value data type: {0:s}'.format(
registry_value.data_type_string))
continue
entry_data_size = entry_map.GetByteSize()
value_data_size = len(registry_value.data)
if entry_data_size != value_data_size:
parser_mediator.ProduceExtractionWarning(
'unsupported value data size: {0:d}'.format(value_data_size))
continue
try:
user_assist_entry = self._ReadStructureFromByteStream(
registry_value.data, 0, entry_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse UserAssist entry value with error: {0!s}'.format(
exception))
continue
event_data = UserAssistWindowsRegistryEventData()
event_data.key_path = count_subkey.path
event_data.number_of_executions = user_assist_entry.number_of_executions
event_data.value_name = value_name
if format_version == 3:
if event_data.number_of_executions > 5:
event_data.number_of_executions -= 5
elif format_version == 5:
userassist_entry_index += 1
event_data.application_focus_count = (
user_assist_entry.application_focus_count)
event_data.application_focus_duration = (
user_assist_entry.application_focus_duration)
event_data.entry_index = userassist_entry_index
timestamp = user_assist_entry.last_execution_time
if not timestamp:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RUN)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,439 |
Initializes an CLI tool.
Args:
input_reader (Optional[InputReader]): input reader, where None indicates
that the stdin input reader should be used.
output_writer (Optional[OutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
|
def __init__(self, input_reader=None, output_writer=None):
super(ExtractionTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._artifacts_registry = None
self._buffer_size = 0
self._mount_path = None
self._operating_system = None
self._parser_filter_expression = None
self._preferred_year = None
self._presets_file = None
self._process_archives = False
self._process_compressed_streams = True
self._process_memory_limit = None
self._queue_size = self._DEFAULT_QUEUE_SIZE
self._resolver_context = dfvfs_context.Context()
self._single_process_mode = False
self._storage_file_path = None
self._storage_format = definitions.STORAGE_FORMAT_SQLITE
self._temporary_directory = None
self._text_prepend = None
self._use_zeromq = True
self._yara_rules_string = None
| 288,442 |
Creates a processing configuration.
Args:
knowledge_base (KnowledgeBase): contains information from the source
data needed for parsing.
Returns:
ProcessingConfiguration: processing configuration.
Raises:
BadConfigOption: if more than 1 parser and parser plugins preset
was found for the detected operating system.
|
def _CreateProcessingConfiguration(self, knowledge_base):
# TODO: pass preferred_encoding.
configuration = configurations.ProcessingConfiguration()
configuration.artifact_filters = self._artifact_filters
configuration.credentials = self._credential_configurations
configuration.debug_output = self._debug_mode
configuration.event_extraction.text_prepend = self._text_prepend
configuration.extraction.hasher_file_size_limit = (
self._hasher_file_size_limit)
configuration.extraction.hasher_names_string = self._hasher_names_string
configuration.extraction.process_archives = self._process_archives
configuration.extraction.process_compressed_streams = (
self._process_compressed_streams)
configuration.extraction.yara_rules_string = self._yara_rules_string
configuration.filter_file = self._filter_file
configuration.input_source.mount_path = self._mount_path
configuration.log_filename = self._log_file
configuration.parser_filter_expression = self._parser_filter_expression
configuration.preferred_year = self._preferred_year
configuration.profiling.directory = self._profiling_directory
configuration.profiling.sample_rate = self._profiling_sample_rate
configuration.profiling.profilers = self._profilers
configuration.temporary_directory = self._temporary_directory
if not configuration.parser_filter_expression:
operating_system = knowledge_base.GetValue('operating_system')
operating_system_product = knowledge_base.GetValue(
'operating_system_product')
operating_system_version = knowledge_base.GetValue(
'operating_system_version')
preset_definitions = (
parsers_manager.ParsersManager.GetPresetsForOperatingSystem(
operating_system, operating_system_product,
operating_system_version))
if preset_definitions:
preset_names = [
preset_definition.name for preset_definition in preset_definitions]
filter_expression = ','.join(preset_names)
logger.info('Parser filter expression set to: {0:s}'.format(
filter_expression))
configuration.parser_filter_expression = filter_expression
return configuration
| 288,443 |
Parses the performance options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
def _ParsePerformanceOptions(self, options):
self._buffer_size = getattr(options, 'buffer_size', 0)
if self._buffer_size:
# TODO: turn this into a generic function that supports more size
# suffixes both MB and MiB and also that does not allow m as a valid
# indicator for MiB since m represents milli not Mega.
try:
if self._buffer_size[-1].lower() == 'm':
self._buffer_size = int(self._buffer_size[:-1], 10)
self._buffer_size *= self._BYTES_IN_A_MIB
else:
self._buffer_size = int(self._buffer_size, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid buffer size: {0!s}.'.format(self._buffer_size))
self._queue_size = self.ParseNumericOption(options, 'queue_size')
| 288,444 |
Parses the processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
def _ParseProcessingOptions(self, options):
self._single_process_mode = getattr(options, 'single_process', False)
argument_helper_names = [
'process_resources', 'temporary_directory', 'workers', 'zeromq']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
| 288,445 |
Preprocesses the sources.
Args:
extraction_engine (BaseEngine): extraction engine to preprocess
the sources.
|
def _PreprocessSources(self, extraction_engine):
logger.debug('Starting preprocessing.')
try:
artifacts_registry = engine.BaseEngine.BuildArtifactsRegistry(
self._artifact_definitions_path, self._custom_artifacts_path)
extraction_engine.PreprocessSources(
artifacts_registry, self._source_path_specs,
resolver_context=self._resolver_context)
except IOError as exception:
logger.error('Unable to preprocess with error: {0!s}'.format(exception))
logger.debug('Preprocessing done.')
| 288,446 |
Sets the parsers and plugins before extraction.
Args:
configuration (ProcessingConfiguration): processing configuration.
session (Session): session.
|
def _SetExtractionParsersAndPlugins(self, configuration, session):
names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames(
parser_filter_expression=configuration.parser_filter_expression)
session.enabled_parser_names = list(names_generator)
session.parser_filter_expression = configuration.parser_filter_expression
| 288,448 |
Sets the preferred time zone before extraction.
Args:
knowledge_base (KnowledgeBase): contains information from the source
data needed for parsing.
|
def _SetExtractionPreferredTimeZone(self, knowledge_base):
# Note session.preferred_time_zone will default to UTC but
# self._preferred_time_zone is None when not set.
if self._preferred_time_zone:
try:
knowledge_base.SetTimeZone(self._preferred_time_zone)
except ValueError:
# pylint: disable=protected-access
logger.warning(
'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(
self._preferred_time_zone, knowledge_base._time_zone.zone))
| 288,449 |
Adds the performance options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
def AddPerformanceOptions(self, argument_group):
argument_group.add_argument(
'--buffer_size', '--buffer-size', '--bs', dest='buffer_size',
action='store', default=0, help=(
'The buffer size for the output (defaults to 196MiB).'))
argument_group.add_argument(
'--queue_size', '--queue-size', dest='queue_size', action='store',
default=0, help=(
'The maximum number of queued items per worker '
'(defaults to {0:d})').format(self._DEFAULT_QUEUE_SIZE))
| 288,450 |
Adds the processing options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
def AddProcessingOptions(self, argument_group):
argument_group.add_argument(
'--single_process', '--single-process', dest='single_process',
action='store_true', default=False, help=(
'Indicate that the tool should run in a single process.'))
argument_helper_names = ['temporary_directory', 'workers', 'zeromq']
if self._CanEnforceProcessMemoryLimit():
argument_helper_names.append('process_resources')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_group, names=argument_helper_names)
| 288,451 |
Initializes event data.
Args:
cookie_identifier (str): unique identifier of the cookie.
|
def __init__(self, cookie_identifier):
data_type = '{0:s}:{1:s}'.format(self.DATA_TYPE, cookie_identifier)
super(GoogleAnalyticsEventData, self).__init__(data_type=data_type)
self.cookie_name = None
self.domain_hash = None
self.pages_viewed = None
self.sessions = None
self.sources = None
self.url = None
self.visitor_id = None
| 288,453 |
Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (str): cookie data.
url (str): URL or path where the cookie got set.
|
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields not in (1, 6):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
visitor_identifier = None
first_visit_posix_time = None
previous_visit_posix_time = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_sessions = None
elif number_of_fields == 6:
domain_hash = fields[0]
visitor_identifier = fields[1]
# TODO: Double check this time is stored in UTC and not local time.
try:
first_visit_posix_time = int(fields[2], 10)
except ValueError:
first_visit_posix_time = None
try:
previous_visit_posix_time = int(fields[3], 10)
except ValueError:
previous_visit_posix_time = None
try:
last_visit_posix_time = int(fields[4], 10)
except ValueError:
last_visit_posix_time = None
try:
number_of_sessions = int(fields[5], 10)
except ValueError:
number_of_sessions = None
event_data = GoogleAnalyticsEventData('utma')
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.sessions = number_of_sessions
event_data.url = url
event_data.visitor_id = visitor_identifier
if first_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=first_visit_posix_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Analytics Creation Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
if previous_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=previous_visit_posix_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Analytics Previous Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
date_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
elif first_visit_posix_time is None and previous_visit_posix_time is None:
# If both creation_time and written_time are None produce an event
# object without a timestamp.
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
if date_time is not None:
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,454 |
Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
|
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields not in (1, 4):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_pages_viewed = None
elif number_of_fields == 4:
domain_hash = fields[0]
try:
number_of_pages_viewed = int(fields[1], 10)
except ValueError:
number_of_pages_viewed = None
try:
if fields[2] in ('8', '9'):
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[3], 10) / 1000
else:
last_visit_posix_time = int(fields[3], 10)
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmb')
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.pages_viewed = number_of_pages_viewed
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,455 |
Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
|
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmt')
event_data.cookie_name = self.COOKIE_NAME
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,456 |
Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (str): cookie data.
url (str): URL or path where the cookie got set.
|
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields > 5:
variables = '.'.join(fields[4:])
fields = fields[0:4]
fields.append(variables)
number_of_fields = len(fields)
if number_of_fields not in (1, 5):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_sessions = None
number_of_sources = None
extra_attributes = {}
elif number_of_fields == 5:
domain_hash = fields[0]
try:
last_visit_posix_time = int(fields[1], 10)
except ValueError:
last_visit_posix_time = None
try:
number_of_sessions = int(fields[2], 10)
except ValueError:
number_of_sessions = None
try:
number_of_sources = int(fields[3], 10)
except ValueError:
number_of_sources = None
extra_variables = fields[4].split('|')
extra_attributes = {}
for variable in extra_variables:
key, _, value = variable.partition('=')
# Urllib2 in Python 2 requires a 'str' argument, not 'unicode'. We thus
# need to convert the value argument to 'str" and back again, but only
# in Python 2.
if isinstance(value, py2to3.UNICODE_TYPE) and py2to3.PY_2:
try:
value = codecs.decode(value, 'ascii')
except UnicodeEncodeError:
value = codecs.decode(value, 'ascii', errors='replace')
parser_mediator.ProduceExtractionWarning(
'Cookie contains non 7-bit ASCII characters, which have been '
'replaced with a "?".')
value = urlparse.unquote(value)
if py2to3.PY_2:
try:
value = codecs.encode(value, 'utf-8')
except UnicodeDecodeError:
value = codecs.encode(value, 'utf-8', errors='replace')
parser_mediator.ProduceExtractionWarning(
'Cookie value did not contain a Unicode string. Non UTF-8 '
'characters have been replaced.')
extra_attributes[key] = value
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmz')
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.sessions = number_of_sessions
event_data.sources = number_of_sources
event_data.url = url
for key, value in iter(extra_attributes.items()):
setattr(event_data, key, value)
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,457 |
Parses a Java WebStart Cache IDX file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dvfvs.FileIO): a file-like object to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
file_header_map = self._GetDataTypeMap('java_idx_file_header')
try:
file_header, file_offset = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
if not file_header.format_version in self._SUPPORTED_FORMAT_VERSIONS:
raise errors.UnableToParseFile('Unsupported format version.')
if file_header.format_version == 602:
section1_map = self._GetDataTypeMap('java_idx_602_section1')
elif file_header.format_version in (603, 604):
section1_map = self._GetDataTypeMap('java_idx_603_section1')
elif file_header.format_version == 605:
section1_map = self._GetDataTypeMap('java_idx_605_section1')
try:
section1, data_size = self._ReadStructureFromFileObject(
file_object, file_offset, section1_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse section 1 (format version: {0:d}) with error: '
'{1!s}').format(file_header.format_version, exception))
file_offset += data_size
if file_header.format_version == 602:
section2_map = self._GetDataTypeMap('java_idx_602_section2')
elif file_header.format_version in (603, 604, 605):
file_offset = 128
section2_map = self._GetDataTypeMap('java_idx_603_section2')
try:
section2, data_size = self._ReadStructureFromFileObject(
file_object, file_offset, section2_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse section 2 (format version: {0:d}) with error: '
'{1!s}').format(file_header.format_version, exception))
file_offset += data_size
if not section2.url:
raise errors.UnableToParseFile('URL not found in file.')
date_http_header = None
for _ in range(section2.number_of_http_headers):
http_header_map = self._GetDataTypeMap('java_idx_http_header')
try:
http_header, data_size = self._ReadStructureFromFileObject(
file_object, file_offset, http_header_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse HTTP header value at offset: 0x{0:08x}'.format(
file_offset))
break
file_offset += data_size
if http_header.name == 'date':
date_http_header = http_header
break
event_data = JavaIDXEventData()
event_data.idx_version = file_header.format_version
event_data.ip_address = getattr(section2, 'ip_address', None)
event_data.url = section2.url
date_time = dfdatetime_java_time.JavaTime(
timestamp=section1.modification_time)
# TODO: Move the timestamp description into definitions.
event = time_events.DateTimeValuesEvent(date_time, 'File Hosted Date')
parser_mediator.ProduceEventWithEventData(event, event_data)
if section1.expiration_time:
date_time = dfdatetime_java_time.JavaTime(
timestamp=section1.expiration_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_EXPIRATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if date_http_header:
# A HTTP header date and string "should" be in UTC or have an associated
# time zone information in the string itself. If that is not the case
# then there is no reliable method for plaso to determine the proper
# time zone, so the assumption is that it is UTC.
try:
download_date = timelib.Timestamp.FromTimeString(
date_http_header.value, gmt_as_timezone=False)
except errors.TimestampError:
parser_mediator.ProduceExtractionWarning(
'Unable to parse date HTTP header value: {0:s}'.format(
date_http_header.value))
if download_date:
event = time_events.TimestampEvent(
download_date, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,459 |
Parses a log header.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
|
def _ParseHeader(self, parser_mediator, structure):
_, month, day, hours, minutes, seconds, year = structure.date_time
month = timelib.MONTH_DICT.get(month.lower(), 0)
time_elements_tuple = (year, month, day, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = month
event_data = XChatLogEventData()
if structure.log_action[0] == 'BEGIN':
self._xchat_year = year
event_data.text = 'XChat start logging'
elif structure.log_action[0] == 'END':
self._xchat_year = None
event_data.text = 'XChat end logging'
else:
logger.debug('Unknown log action: {0:s}.'.format(
' '.join(structure.log_action)))
return
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,462 |
Parses a log line.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
|
def _ParseLogLine(self, parser_mediator, structure):
if not self._xchat_year:
return
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = time_elements_tuple[1]
event_data = XChatLogEventData()
event_data.nickname = structure.nickname
# The text string contains multiple unnecessary whitespaces that need to
# be removed, thus the split and re-join.
event_data.text = ' '.join(structure.text.split())
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,463 |
Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
|
def ParseRecord(self, parser_mediator, key, structure):
if key not in ('header', 'header_signature', 'logline'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'logline':
self._ParseLogLine(parser_mediator, structure)
elif key == 'header':
self._ParseHeader(parser_mediator, structure)
elif key == 'header_signature':
# If this key is matched (after others keys failed) we got a different
# localized header and we should stop parsing until a new good header
# is found. Stop parsing is done setting xchat_year to 0.
# Note that the code assumes that LINE_STRUCTURES will be used in the
# exact order as defined!
logger.warning('Unknown locale header.')
self._xchat_year = 0
| 288,464 |
Verify that this file is a XChat log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
|
def VerifyStructure(self, parser_mediator, line):
try:
structure = self._HEADER.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a XChat log file')
return False
_, month, day, hours, minutes, seconds, year = structure.date_time
month = timelib.MONTH_DICT.get(month.lower(), 0)
time_elements_tuple = (year, month, day, hours, minutes, seconds)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
return True
| 288,465 |
Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
|
def AddArguments(cls, argument_group):
argument_group.add_argument(
'--user', dest='username', type=str, action='store',
default=cls._DEFAULT_USERNAME, metavar='USERNAME', required=False,
help='The username used to connect to the database.')
argument_group.add_argument(
'--password', dest='password', type=str, action='store',
default=cls._DEFAULT_PASSWORD, metavar='PASSWORD', help=(
'The password for the database user.'))
argument_group.add_argument(
'--db_name', '--db-name', dest='db_name', action='store',
type=str, default=cls._DEFAULT_NAME, required=False, help=(
'The name of the database to connect to.'))
server_config.ServerArgumentsHelper.AddArguments(argument_group)
| 288,466 |
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetCredentials or SetDatabaseName methods.
|
def ParseOptions(cls, options, output_module):
if not hasattr(output_module, 'SetCredentials'):
raise errors.BadConfigObject('Unable to set username information.')
if not hasattr(output_module, 'SetDatabaseName'):
raise errors.BadConfigObject('Unable to set database information.')
username = cls._ParseStringOption(
options, 'username', default_value=cls._DEFAULT_USERNAME)
password = cls._ParseStringOption(
options, 'password', default_value=cls._DEFAULT_PASSWORD)
name = cls._ParseStringOption(
options, 'db_name', default_value=cls._DEFAULT_NAME)
output_module.SetCredentials(username=username, password=password)
output_module.SetDatabaseName(name)
server_config.ServerArgumentsHelper.ParseOptions(options, output_module)
| 288,467 |
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
filter_collection = getattr(
configuration_object, '_filter_collection', None)
if not filter_collection:
raise errors.BadConfigObject(
'Filter collection missing from configuration object')
date_filters = getattr(options, 'date_filters', None)
if not date_filters:
return
file_entry_filter = file_entry_filters.DateTimeFileEntryFilter()
for date_filter in date_filters:
date_filter_pieces = date_filter.split(',')
if len(date_filter_pieces) != 3:
raise errors.BadConfigOption(
'Badly formed date filter: {0:s}'.format(date_filter))
time_value, start_time_string, end_time_string = date_filter_pieces
time_value = time_value.strip()
start_time_string = start_time_string.strip()
end_time_string = end_time_string.strip()
try:
file_entry_filter.AddDateTimeRange(
time_value, start_time_string=start_time_string,
end_time_string=end_time_string)
except ValueError:
raise errors.BadConfigOption(
'Badly formed date filter: {0:s}'.format(date_filter))
filter_collection.AddFilter(file_entry_filter)
| 288,468 |
Parses a .customDestinations-ms file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
file_entry = parser_mediator.GetFileEntry()
display_name = parser_mediator.GetDisplayName()
file_header_map = self._GetDataTypeMap('custom_file_header')
try:
file_header, file_offset = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Invalid Custom Destination: {0:s} - unable to parse file header '
'with error: {1!s}').format(display_name, exception))
if file_header.unknown1 != 2:
raise errors.UnableToParseFile((
'Unsupported Custom Destination file: {0:s} - invalid unknown1: '
'{1:d}.').format(display_name, file_header.unknown1))
if file_header.header_values_type > 2:
raise errors.UnableToParseFile((
'Unsupported Custom Destination file: {0:s} - invalid header value '
'type: {1:d}.').format(display_name, file_header.header_values_type))
if file_header.header_values_type == 0:
data_map_name = 'custom_file_header_value_type_0'
else:
data_map_name = 'custom_file_header_value_type_1_or_2'
file_header_value_map = self._GetDataTypeMap(data_map_name)
try:
_, value_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, file_header_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Invalid Custom Destination: {0:s} - unable to parse file header '
'value with error: {1!s}').format(display_name, exception))
file_offset += value_data_size
file_size = file_object.get_size()
remaining_file_size = file_size - file_offset
entry_header_map = self._GetDataTypeMap('custom_entry_header')
file_footer_map = self._GetDataTypeMap('custom_file_footer')
# The Custom Destination file does not have a unique signature in
# the file header that is why we use the first LNK class identifier (GUID)
# as a signature.
first_guid_checked = False
while remaining_file_size > 4:
try:
entry_header, entry_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, entry_header_map)
except (ValueError, errors.ParseError) as exception:
if not first_guid_checked:
raise errors.UnableToParseFile((
'Invalid Custom Destination file: {0:s} - unable to parse '
'entry header with error: {1!s}').format(
display_name, exception))
parser_mediator.ProduceExtractionWarning(
'unable to parse entry header with error: {0!s}'.format(
exception))
break
if entry_header.guid != self._LNK_GUID:
if not first_guid_checked:
raise errors.UnableToParseFile((
'Unsupported Custom Destination file: {0:s} - invalid entry '
'header signature offset: 0x{1:08x}.').format(
display_name, file_offset))
try:
# Check if we found the footer instead of an entry header.
file_footer, _ = self._ReadStructureFromFileObject(
file_object, file_offset, file_footer_map)
if file_footer.signature != self._FILE_FOOTER_SIGNATURE:
parser_mediator.ProduceExtractionWarning(
'invalid entry header signature at offset: 0x{0:08x}'.format(
file_offset))
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse footer at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
# TODO: add support for Jump List LNK file recovery.
break
first_guid_checked = True
file_offset += entry_data_size
remaining_file_size -= entry_data_size
lnk_file_size = self._ParseLNKFile(
parser_mediator, file_entry, file_offset, remaining_file_size)
file_offset += lnk_file_size
remaining_file_size -= lnk_file_size
try:
file_footer, _ = self._ReadStructureFromFileObject(
file_object, file_offset, file_footer_map)
if file_footer.signature != self._FILE_FOOTER_SIGNATURE:
parser_mediator.ProduceExtractionWarning(
'invalid footer signature at offset: 0x{0:08x}'.format(file_offset))
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse footer at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
| 288,470 |
Parses an Messages row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = HangoutsMessageData()
event_data.sender = self._GetRowValue(query_hash, row, 'full_name')
event_data.body = self._GetRowValue(query_hash, row, 'text')
event_data.offset = self._GetRowValue(query_hash, row, '_id')
event_data.query = query
event_data.message_status = self._GetRowValue(query_hash, row, 'status')
event_data.message_type = self._GetRowValue(query_hash, row, 'type')
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,472 |
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
temporary_directory = getattr(options, 'temporary_directory', None)
if temporary_directory and not os.path.isdir(temporary_directory):
raise errors.BadConfigOption(
'No such temporary directory: {0:s}'.format(temporary_directory))
setattr(configuration_object, '_temporary_directory', temporary_directory)
| 288,473 |
Initializes an Elasticsearch output module.
Args:
output_mediator (OutputMediator): mediates interactions between output
modules and other components, such as storage and dfvfs.
|
def __init__(self, output_mediator):
super(SharedElasticsearchOutputModule, self).__init__(output_mediator)
self._client = None
self._document_type = self._DEFAULT_DOCUMENT_TYPE
self._event_documents = []
self._flush_interval = self._DEFAULT_FLUSH_INTERVAL
self._host = None
self._index_name = None
self._number_of_buffered_events = 0
self._password = None
self._port = None
self._username = None
self._use_ssl = None
self._ca_certs = None
self._url_prefix = None
| 288,474 |
Creates an Elasticsearch index if it does not exist.
Args:
index_name (str): mame of the index.
mappings (dict[str, object]): mappings of the index.
Raises:
RuntimeError: if the Elasticsearch index cannot be created.
|
def _CreateIndexIfNotExists(self, index_name, mappings):
try:
if not self._client.indices.exists(index_name):
self._client.indices.create(
body={'mappings': mappings}, index=index_name)
except elasticsearch.exceptions.ConnectionError as exception:
raise RuntimeError(
'Unable to create Elasticsearch index with error: {0!s}'.format(
exception))
| 288,476 |
Inserts an event.
Events are buffered in the form of documents and inserted to Elasticsearch
when either forced to flush or when the flush interval (threshold) has been
reached.
Args:
event (EventObject): event.
force_flush (bool): True if buffered event documents should be inserted
into Elasticsearch.
|
def _InsertEvent(self, event, force_flush=False):
if event:
event_document = {'index': {
'_index': self._index_name, '_type': self._document_type}}
event_values = self._GetSanitizedEventValues(event)
self._event_documents.append(event_document)
self._event_documents.append(event_values)
self._number_of_buffered_events += 1
if force_flush or self._number_of_buffered_events > self._flush_interval:
self._FlushEvents()
| 288,479 |
Sets the document type.
Args:
document_type (str): document type.
|
def SetDocumentType(self, document_type):
self._document_type = document_type
logger.debug('Elasticsearch document type: {0:s}'.format(document_type))
| 288,480 |
Set the flush interval.
Args:
flush_interval (int): number of events to buffer before doing a bulk
insert.
|
def SetFlushInterval(self, flush_interval):
self._flush_interval = flush_interval
logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval))
| 288,481 |
Set the index name.
Args:
index_name (str): name of the index.
|
def SetIndexName(self, index_name):
self._index_name = index_name
logger.debug('Elasticsearch index name: {0:s}'.format(index_name))
| 288,482 |
Set the server information.
Args:
server (str): IP address or hostname of the server.
port (int): Port number of the server.
|
def SetServerInformation(self, server, port):
self._host = server
self._port = port
logger.debug('Elasticsearch server: {0!s} port: {1:d}'.format(
server, port))
| 288,483 |
Sets the username.
Args:
username (str): username to authenticate with.
|
def SetUsername(self, username):
self._username = username
logger.debug('Elasticsearch username: {0!s}'.format(username))
| 288,484 |
Sets the use of ssl.
Args:
use_ssl (bool): enforces use of ssl.
|
def SetUseSSL(self, use_ssl):
self._use_ssl = use_ssl
logger.debug('Elasticsearch use_ssl: {0!s}'.format(use_ssl))
| 288,485 |
Sets the path to the CA certificates.
Args:
ca_certificates_path (str): path to file containing a list of root
certificates to trust.
Raises:
BadConfigOption: if the CA certificates file does not exist.
|
def SetCACertificatesPath(self, ca_certificates_path):
if not ca_certificates_path:
return
if not os.path.exists(ca_certificates_path):
raise errors.BadConfigOption(
'No such certificate file: {0:s}.'.format(ca_certificates_path))
self._ca_certs = ca_certificates_path
logger.debug('Elasticsearch ca_certs: {0!s}'.format(ca_certificates_path))
| 288,486 |
Parses a launch services quarantine event row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
def ParseLSQuarantineRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = LsQuarantineEventData()
event_data.agent = self._GetRowValue(query_hash, row, 'Agent')
event_data.data = self._GetRowValue(query_hash, row, 'Data')
event_data.query = query
event_data.url = self._GetRowValue(query_hash, row, 'URL')
timestamp = self._GetRowValue(query_hash, row, 'Time')
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,488 |
Retrieves a time elements tuple from the structure.
Args:
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
|
def _GetTimeElementsTuple(self, structure):
month, day, hours, minutes, seconds = structure.date_time
# Note that dfdatetime_time_elements.TimeElements will raise ValueError
# for an invalid month.
month = timelib.MONTH_DICT.get(month.lower(), 0)
if month != 0 and month < self._last_month:
# Gap detected between years.
self._year_use += 1
return (self._year_use, month, day, hours, minutes, seconds)
| 288,491 |
Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
key (str): name of the parsed structure.
|
def _ParseLogLine(self, parser_mediator, structure, key):
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = time_elements_tuple[1]
if key == 'logline':
self._previous_structure = structure
message = structure.message
else:
message = 'Repeated {0:d} times: {1:s}'.format(
structure.times, self._previous_structure.message)
structure = self._previous_structure
# It uses CarsNotIn structure which leaves whitespaces
# at the beginning of the sender and the caller.
event_data = MacOSSecuritydLogEventData()
event_data.caller = structure.caller.strip() or 'unknown'
event_data.facility = structure.facility
event_data.level = structure.level
event_data.message = message
event_data.security_api = structure.security_api or 'unknown'
event_data.sender_pid = structure.sender_pid
event_data.sender = structure.sender.strip()
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,492 |
Verify that this file is a securityd log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
|
def VerifyStructure(self, parser_mediator, line):
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.SECURITYD_LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a MacOS securityd log file')
return False
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a MacOS securityd log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
self._last_month = time_elements_tuple[1]
return True
| 288,493 |
Initializes a formatter mediator object.
Args:
data_location (str): path of the formatter data files.
|
def __init__(self, data_location=None):
super(FormatterMediator, self).__init__()
self._data_location = data_location
self._language_identifier = self.DEFAULT_LANGUAGE_IDENTIFIER
self._lcid = self.DEFAULT_LCID
self._winevt_database_reader = None
| 288,494 |
Retrieves the message string for a specific Windows Event Log source.
Args:
log_source (str): Event Log source, such as "Application Error".
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
|
def GetWindowsEventMessage(self, log_source, message_identifier):
database_reader = self._GetWinevtRcDatabaseReader()
if not database_reader:
return None
if self._lcid != self.DEFAULT_LCID:
message_string = database_reader.GetMessage(
log_source, self.lcid, message_identifier)
if message_string:
return message_string
return database_reader.GetMessage(
log_source, self.DEFAULT_LCID, message_identifier)
| 288,496 |
Sets the preferred language identifier.
Args:
language_identifier (str): language identifier string such as "en-US"
for US English or "is-IS" for Icelandic.
Raises:
KeyError: if the language identifier is not defined.
ValueError: if the language identifier is not a string type.
|
def SetPreferredLanguageIdentifier(self, language_identifier):
if not isinstance(language_identifier, py2to3.STRING_TYPES):
raise ValueError('Language identifier is not a string.')
values = language_ids.LANGUAGE_IDENTIFIERS.get(
language_identifier.lower(), None)
if not values:
raise KeyError('Language identifier: {0:s} is not defined.'.format(
language_identifier))
self._language_identifier = language_identifier
self._lcid = values[0]
| 288,497 |
Deregisters a helper class.
The helper classes are identified based on their lower case name.
Args:
helper_class (type): class object of the argument helper.
Raises:
KeyError: if helper class is not set for the corresponding name.
|
def DeregisterHelper(cls, helper_class):
helper_name = helper_class.NAME.lower()
if helper_name not in cls._helper_classes:
raise KeyError('Helper class not set for name: {0:s}.'.format(
helper_class.NAME))
del cls._helper_classes[helper_name]
| 288,499 |
Initializes a Viper hash analyzer.
Args:
hash_queue (Queue.queue): contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): that the analyzer will append
HashAnalysis objects this queue.
|
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
super(ViperAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._checked_for_old_python_version = False
self._host = None
self._port = None
self._protocol = None
self._url = None
| 288,502 |
Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
|
def _QueryHash(self, digest):
if not self._url:
self._url = '{0:s}://{1:s}:{2:d}/file/find'.format(
self._protocol, self._host, self._port)
request_data = {self.lookup_hash: digest}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._url, 'POST', data=request_data)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query Viper with error: {0!s}.'.format(
exception))
return json_response
| 288,503 |
Looks up hashes in Viper using the Viper HTTP API.
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: hash analysis.
Raises:
RuntimeError: If no host has been set for Viper.
|
def Analyze(self, hashes):
hash_analyses = []
for digest in hashes:
json_response = self._QueryHash(digest)
hash_analysis = interface.HashAnalysis(digest, json_response)
hash_analyses.append(hash_analysis)
return hash_analyses
| 288,504 |
Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: if the protocol is not supported.
|
def SetProtocol(self, protocol):
if protocol not in self.SUPPORTED_PROTOCOLS:
raise ValueError('Unsupported protocol: {0!s}'.format(protocol))
self._protocol = protocol
| 288,505 |
Generates a list of strings that will be used in the event tag.
Args:
hash_information (dict[str, object]): JSON decoded contents of the result
of a Viper lookup, as produced by the ViperAnalyzer.
Returns:
list[str]: list of labels to apply to events.
|
def GenerateLabels(self, hash_information):
if not hash_information:
return ['viper_not_present']
projects = []
tags = []
for project, entries in iter(hash_information.items()):
if not entries:
continue
projects.append(project)
for entry in entries:
if entry['tags']:
tags.extend(entry['tags'])
if not projects:
return ['viper_not_present']
strings = ['viper_present']
for project_name in projects:
label = events.EventTag.CopyTextToLabel(
project_name, prefix='viper_project_')
strings.append(label)
for tag_name in tags:
label = events.EventTag.CopyTextToLabel(tag_name, prefix='viper_tag_')
strings.append(label)
return strings
| 288,507 |
Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected.
|
def SetProtocol(self, protocol):
protocol = protocol.lower().strip()
if protocol not in ['http', 'https']:
raise ValueError('Invalid protocol specified for Viper lookup')
self._analyzer.SetProtocol(protocol)
| 288,508 |
Processes a path specification.
Args:
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
path_spec (dfvfs.PathSpec): path specification.
|
def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):
self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
path_spec)
try:
extraction_worker.ProcessPathSpec(parser_mediator, path_spec)
except KeyboardInterrupt:
self._abort = True
self._processing_status.aborted = True
if self._status_update_callback:
self._status_update_callback(self._processing_status)
# We cannot recover from a CacheFullError and abort processing when
# it is raised.
except dfvfs_errors.CacheFullError:
# TODO: signal engine of failure.
self._abort = True
logger.error((
'ABORT: detected cache full error while processing '
'path spec: {0:s}').format(self._current_display_name))
# All exceptions need to be caught here to prevent the worker
# from being killed by an uncaught exception.
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'unable to process path specification with error: '
'{0!s}').format(exception), path_spec=path_spec)
if getattr(self._processing_configuration, 'debug_output', False):
logger.warning(
'Unhandled exception while processing path spec: {0:s}.'.format(
self._current_display_name))
logger.exception(exception)
pdb.post_mortem()
| 288,510 |
Processes the sources.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications of
the sources to process.
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
storage_writer (StorageWriter): storage writer for a session storage.
filter_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications
used in path specification extraction.
|
def _ProcessSources(
self, source_path_specs, extraction_worker, parser_mediator,
storage_writer, filter_find_specs=None):
if self._processing_profiler:
self._processing_profiler.StartTiming('process_sources')
number_of_consumed_sources = 0
self._UpdateStatus(
definitions.STATUS_INDICATOR_COLLECTING, '',
number_of_consumed_sources, storage_writer)
display_name = ''
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
source_path_specs, find_specs=filter_find_specs,
recurse_file_system=False,
resolver_context=parser_mediator.resolver_context)
for path_spec in path_spec_generator:
if self._abort:
break
display_name = parser_mediator.GetDisplayNameForPathSpec(path_spec)
# TODO: determine if event sources should be DataStream or FileEntry
# or both.
event_source = event_sources.FileEntryEventSource(path_spec=path_spec)
storage_writer.AddEventSource(event_source)
self._UpdateStatus(
definitions.STATUS_INDICATOR_COLLECTING, display_name,
number_of_consumed_sources, storage_writer)
# Force the status update here to make sure the status is up to date.
self._UpdateStatus(
definitions.STATUS_INDICATOR_RUNNING, display_name,
number_of_consumed_sources, storage_writer, force=True)
if self._processing_profiler:
self._processing_profiler.StartTiming('get_event_source')
event_source = storage_writer.GetFirstWrittenEventSource()
if self._processing_profiler:
self._processing_profiler.StopTiming('get_event_source')
while event_source:
if self._abort:
break
self._ProcessPathSpec(
extraction_worker, parser_mediator, event_source.path_spec)
number_of_consumed_sources += 1
if self._guppy_memory_profiler:
self._guppy_memory_profiler.Sample()
self._UpdateStatus(
extraction_worker.processing_status, self._current_display_name,
number_of_consumed_sources, storage_writer)
if self._processing_profiler:
self._processing_profiler.StartTiming('get_event_source')
event_source = storage_writer.GetNextWrittenEventSource()
if self._processing_profiler:
self._processing_profiler.StopTiming('get_event_source')
if self._abort:
status = definitions.STATUS_INDICATOR_ABORTED
else:
status = definitions.STATUS_INDICATOR_COMPLETED
# Force the status update here to make sure the status is up to date
# on exit.
self._UpdateStatus(
status, '', number_of_consumed_sources, storage_writer, force=True)
if self._processing_profiler:
self._processing_profiler.StopTiming('process_sources')
| 288,511 |
Updates the processing status.
Args:
status (str): human readable status of the processing e.g. 'Idle'.
display_name (str): human readable of the file entry currently being
processed.
number_of_consumed_sources (int): number of consumed sources.
storage_writer (StorageWriter): storage writer for a session storage.
force (Optional[bool]): True if the update should be forced ignoring
the last status update time.
|
def _UpdateStatus(
self, status, display_name, number_of_consumed_sources, storage_writer,
force=False):
current_timestamp = time.time()
if not force and current_timestamp < (
self._last_status_update_timestamp + self._STATUS_UPDATE_INTERVAL):
return
if status == definitions.STATUS_INDICATOR_IDLE:
status = definitions.STATUS_INDICATOR_RUNNING
used_memory = self._process_information.GetUsedMemory() or 0
self._processing_status.UpdateForemanStatus(
self._name, status, self._pid, used_memory, display_name,
number_of_consumed_sources, storage_writer.number_of_event_sources, 0,
storage_writer.number_of_events, 0, 0, 0, 0, 0,
storage_writer.number_of_warnings)
if self._status_update_callback:
self._status_update_callback(self._processing_status)
self._last_status_update_timestamp = current_timestamp
| 288,512 |
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter(
'Invalid event object - unsupported data type: {0:s}'.format(
event.data_type))
event_values = event.CopyToDict()
number_of_volumes = event_values.get('number_of_volumes', 0)
volume_serial_numbers = event_values.get('volume_serial_numbers', None)
volume_device_paths = event_values.get('volume_device_paths', None)
volumes_strings = []
for volume_index in range(0, number_of_volumes):
if not volume_serial_numbers:
volume_serial_number = 'UNKNOWN'
else:
volume_serial_number = volume_serial_numbers[volume_index]
if not volume_device_paths:
volume_device_path = 'UNKNOWN'
else:
volume_device_path = volume_device_paths[volume_index]
volumes_strings.append((
'volume: {0:d} [serial number: 0x{1:08X}, device path: '
'{2:s}]').format(
volume_index + 1, volume_serial_number, volume_device_path))
if volumes_strings:
event_values['volumes_string'] = ', '.join(volumes_strings)
return self._ConditionalFormatMessages(event_values)
| 288,514 |
Reads the file header.
Args:
file_object (file): file-like object.
Returns:
keychain_file_header: file header.
Raises:
ParseError: if the file header cannot be read.
|
def _ReadFileHeader(self, file_object):
data_type_map = self._GetDataTypeMap('keychain_file_header')
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, data_type_map)
if file_header.signature != self._FILE_SIGNATURE:
raise errors.ParseError('Unsupported file signature.')
if (file_header.major_format_version != self._MAJOR_VERSION or
file_header.minor_format_version != self._MINOR_VERSION):
raise errors.ParseError('Unsupported format version: {0:s}.{1:s}'.format(
file_header.major_format_version, file_header.minor_format_version))
return file_header
| 288,522 |
Reads the record.
Args:
tables (dict[int, KeychainDatabaseTable]): tables per identifier.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
record_type (int): record type, which should correspond to a relation
identifier of a table defined in the schema.
Raises:
ParseError: if the record cannot be read.
|
def _ReadRecord(self, tables, file_object, record_offset, record_type):
table = tables.get(record_type, None)
if not table:
raise errors.ParseError(
'Missing table for relation identifier: 0x{0:08}'.format(record_type))
record_header = self._ReadRecordHeader(file_object, record_offset)
record = collections.OrderedDict()
if table.columns:
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, len(table.columns))
file_offset = file_object.tell()
record_data_offset = file_offset - record_offset
record_data_size = record_header.data_size - (file_offset - record_offset)
record_data = file_object.read(record_data_size)
if record_header.key_data_size > 0:
record['_key_'] = record_data[:record_header.key_data_size]
if table.columns:
for index, column in enumerate(table.columns):
attribute_data_read_function = self._ATTRIBUTE_DATA_READ_FUNCTIONS.get(
column.attribute_data_type, None)
if attribute_data_read_function:
attribute_data_read_function = getattr(
self, attribute_data_read_function, None)
if not attribute_data_read_function:
attribute_value = None
else:
attribute_value = attribute_data_read_function(
record_data, record_offset, record_data_offset,
attribute_value_offsets[index])
record[column.attribute_name] = attribute_value
table.records.append(record)
| 288,523 |
Reads the record attribute value offsets.
Args:
file_object (file): file-like object.
file_offset (int): offset of the record attribute values offsets relative
to the start of the file.
number_of_attribute_values (int): number of attribute values.
Returns:
keychain_record_attribute_value_offsets: record attribute value offsets.
Raises:
ParseError: if the record attribute value offsets cannot be read.
|
def _ReadRecordAttributeValueOffset(
self, file_object, file_offset, number_of_attribute_values):
offsets_data_size = number_of_attribute_values * 4
offsets_data = file_object.read(offsets_data_size)
context = dtfabric_data_maps.DataTypeMapContext(values={
'number_of_attribute_values': number_of_attribute_values})
data_type_map = self._GetDataTypeMap(
'keychain_record_attribute_value_offsets')
try:
attribute_value_offsets = self._ReadStructureFromByteStream(
offsets_data, file_offset, data_type_map, context=context)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map record attribute value offsets data at offset: '
'0x{0:08x} with error: {1!s}').format(file_offset, exception))
return attribute_value_offsets
| 288,524 |
Reads the record header.
Args:
file_object (file): file-like object.
record_header_offset (int): offset of the record header relative to
the start of the file.
Returns:
keychain_record_header: record header.
Raises:
ParseError: if the record header cannot be read.
|
def _ReadRecordHeader(self, file_object, record_header_offset):
data_type_map = self._GetDataTypeMap('keychain_record_header')
record_header, _ = self._ReadStructureFromFileObject(
file_object, record_header_offset, data_type_map)
return record_header
| 288,525 |
Reads a schema attributes (CSSM_DL_DB_SCHEMA_ATTRIBUTES) record.
Args:
tables (dict[int, KeychainDatabaseTable]): tables per identifier.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Raises:
ParseError: if the record cannot be read.
|
def _ReadRecordSchemaAttributes(self, tables, file_object, record_offset):
record_header = self._ReadRecordHeader(file_object, record_offset)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, 6)
file_offset = file_object.tell()
attribute_values_data_offset = file_offset - record_offset
attribute_values_data_size = record_header.data_size - (
file_offset - record_offset)
attribute_values_data = file_object.read(attribute_values_data_size)
relation_identifier = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[0])
attribute_identifier = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[1])
attribute_name_data_type = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[2])
attribute_name = self._ReadAttributeValueString(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[3])
# TODO: handle attribute_value_offsets[4]
attribute_data_type = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[5])
table = tables.get(relation_identifier, None)
if not table:
raise errors.ParseError(
'Missing table for relation identifier: 0x{0:08}'.format(
relation_identifier))
if attribute_name is None and attribute_value_offsets[1] != 0:
attribute_value_offset = attribute_value_offsets[1]
attribute_value_offset -= attribute_values_data_offset + 1
attribute_name = attribute_values_data[
attribute_value_offset:attribute_value_offset + 4]
attribute_name = attribute_name.decode('ascii')
column = KeychainDatabaseColumn()
column.attribute_data_type = attribute_data_type
column.attribute_identifier = attribute_identifier
column.attribute_name = attribute_name
table.columns.append(column)
table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES, None)
if not table:
raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_ATTRIBUTES table.')
record = collections.OrderedDict({
'RelationID': relation_identifier,
'AttributeID': attribute_identifier,
'AttributeNameFormat': attribute_name_data_type,
'AttributeName': attribute_name,
'AttributeFormat': attribute_data_type})
table.records.append(record)
| 288,526 |
Reads a schema indexes (CSSM_DL_DB_SCHEMA_INDEXES) record.
Args:
tables (dict[int, KeychainDatabaseTable]): tables per identifier.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Raises:
ParseError: if the record cannot be read.
|
def _ReadRecordSchemaIndexes(self, tables, file_object, record_offset):
_ = self._ReadRecordHeader(file_object, record_offset)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, 5)
if attribute_value_offsets != (0x2d, 0x31, 0x35, 0x39, 0x3d):
raise errors.ParseError('Unsupported record attribute value offsets')
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('keychain_record_schema_indexes')
record_values, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map)
if record_values.relation_identifier not in tables:
raise errors.ParseError(
'CSSM_DL_DB_SCHEMA_INDEXES defines relation identifier not defined '
'in CSSM_DL_DB_SCHEMA_INFO.')
table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES, None)
if not table:
raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INDEXES table.')
record = collections.OrderedDict({
'RelationID': record_values.relation_identifier,
'IndexID': record_values.index_identifier,
'AttributeID': record_values.attribute_identifier,
'IndexType': record_values.index_type,
'IndexedDataLocation': record_values.index_data_location})
table.records.append(record)
| 288,527 |
Reads a schema information (CSSM_DL_DB_SCHEMA_INFO) record.
Args:
tables (dict[int, KeychainDatabaseTable]): tables per identifier.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Raises:
ParseError: if the record cannot be read.
|
def _ReadRecordSchemaInformation(self, tables, file_object, record_offset):
_ = self._ReadRecordHeader(file_object, record_offset)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, 2)
if attribute_value_offsets != (0x21, 0x25):
raise errors.ParseError('Unsupported record attribute value offsets')
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('keychain_record_schema_information')
record_values, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map)
relation_name = record_values.relation_name.decode('ascii')
table = KeychainDatabaseTable()
table.relation_identifier = record_values.relation_identifier
table.relation_name = relation_name
tables[table.relation_identifier] = table
table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO, None)
if not table:
raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INFO table.')
record = collections.OrderedDict({
'RelationID': record_values.relation_identifier,
'RelationName': relation_name})
table.records.append(record)
| 288,528 |
Reads the table.
Args:
tables (dict[int, KeychainDatabaseTable]): tables per identifier.
file_object (file): file-like object.
table_offset (int): offset of the table relative to the start of
the file.
Raises:
ParseError: if the table cannot be read.
|
def _ReadTable(self, tables, file_object, table_offset):
table_header = self._ReadTableHeader(file_object, table_offset)
for record_offset in table_header.record_offsets:
if record_offset == 0:
continue
record_offset += table_offset
if table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO:
self._ReadRecordSchemaInformation(tables, file_object, record_offset)
elif table_header.record_type == (
self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES):
self._ReadRecordSchemaIndexes(tables, file_object, record_offset)
elif table_header.record_type == (
self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES):
self._ReadRecordSchemaAttributes(tables, file_object, record_offset)
else:
self._ReadRecord(
tables, file_object, record_offset, table_header.record_type)
| 288,529 |
Reads the table header.
Args:
file_object (file): file-like object.
table_header_offset (int): offset of the tables header relative to
the start of the file.
Returns:
keychain_table_header: table header.
Raises:
ParseError: if the table header cannot be read.
|
def _ReadTableHeader(self, file_object, table_header_offset):
data_type_map = self._GetDataTypeMap('keychain_table_header')
table_header, _ = self._ReadStructureFromFileObject(
file_object, table_header_offset, data_type_map)
return table_header
| 288,530 |
Reads the tables array.
Args:
file_object (file): file-like object.
tables_array_offset (int): offset of the tables array relative to
the start of the file.
Returns:
dict[int, KeychainDatabaseTable]: tables per identifier.
Raises:
ParseError: if the tables array cannot be read.
|
def _ReadTablesArray(self, file_object, tables_array_offset):
# TODO: implement https://github.com/libyal/dtfabric/issues/12 and update
# keychain_tables_array definition.
data_type_map = self._GetDataTypeMap('keychain_tables_array')
tables_array, _ = self._ReadStructureFromFileObject(
file_object, tables_array_offset, data_type_map)
tables = collections.OrderedDict()
for table_offset in tables_array.table_offsets:
self._ReadTable(tables, file_object, tables_array_offset + table_offset)
return tables
| 288,531 |
Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
dfdatetime.TimeElements: date and time extracted from the value or None
if the value does not represent a valid string.
|
def _ParseDateTimeValue(self, parser_mediator, date_time_value):
if date_time_value[14] != 'Z':
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
try:
year = int(date_time_value[0:4], 10)
month = int(date_time_value[4:6], 10)
day_of_month = int(date_time_value[6:8], 10)
hours = int(date_time_value[8:10], 10)
minutes = int(date_time_value[10:12], 10)
seconds = int(date_time_value[12:14], 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
return dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
| 288,532 |
Parses a binary data value as string
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
binary_data_value (bytes): binary data value
(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)
Returns:
str: binary data value formatted as a string or None if no string could
be extracted or binary data value is None (NULL).
|
def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value):
if not binary_data_value:
return None
try:
return binary_data_value.decode('utf-8')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'invalid binary data string value: {0:s}'.format(
repr(binary_data_value)))
return None
| 288,533 |
Extracts the information from an application password record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record (dict[str, object]): database record.
Raises:
ParseError: if Internet password record cannot be parsed.
|
def _ParseApplicationPasswordRecord(self, parser_mediator, record):
key = record.get('_key_', None)
if not key or not key.startswith(b'ssgp'):
raise errors.ParseError((
'Unsupported application password record key value does not start '
'with: "ssgp".'))
event_data = KeychainApplicationRecordEventData()
event_data.account_name = self._ParseBinaryDataAsString(
parser_mediator, record['acct'])
event_data.comments = self._ParseBinaryDataAsString(
parser_mediator, record['crtr'])
event_data.entry_name = self._ParseBinaryDataAsString(
parser_mediator, record['PrintName'])
ssgp_hash = codecs.encode(key[4:], 'hex')
event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8')
event_data.text_description = self._ParseBinaryDataAsString(
parser_mediator, record['desc'])
date_time = self._ParseDateTimeValue(parser_mediator, record['cdat'])
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
date_time = self._ParseDateTimeValue(parser_mediator, record['mdat'])
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,534 |
Extracts the information from an Internet password record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record (dict[str, object]): database record.
Raises:
ParseError: if Internet password record cannot be parsed.
|
def _ParseInternetPasswordRecord(self, parser_mediator, record):
key = record.get('_key_', None)
if not key or not key.startswith(b'ssgp'):
raise errors.ParseError((
'Unsupported Internet password record key value does not start '
'with: "ssgp".'))
protocol_string = codecs.decode('{0:08x}'.format(record['ptcl']), 'hex')
protocol_string = codecs.decode(protocol_string, 'utf-8')
event_data = KeychainInternetRecordEventData()
event_data.account_name = self._ParseBinaryDataAsString(
parser_mediator, record['acct'])
event_data.comments = self._ParseBinaryDataAsString(
parser_mediator, record['crtr'])
event_data.entry_name = self._ParseBinaryDataAsString(
parser_mediator, record['PrintName'])
event_data.protocol = self._PROTOCOL_TRANSLATION_DICT.get(
protocol_string, protocol_string)
ssgp_hash = codecs.encode(key[4:], 'hex')
event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8')
event_data.text_description = self._ParseBinaryDataAsString(
parser_mediator, record['desc'])
event_data.type_protocol = self._ParseBinaryDataAsString(
parser_mediator, record['atyp'])
event_data.where = self._ParseBinaryDataAsString(
parser_mediator, record['srvr'])
date_time = self._ParseDateTimeValue(parser_mediator, record['cdat'])
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
date_time = self._ParseDateTimeValue(parser_mediator, record['mdat'])
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 288,535 |
Parses a MacOS keychain file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
try:
file_header = self._ReadFileHeader(file_object)
except (ValueError, errors.ParseError):
raise errors.UnableToParseFile('Unable to parse file header.')
tables = self._ReadTablesArray(file_object, file_header.tables_array_offset)
table = tables.get(self._RECORD_TYPE_APPLICATION_PASSWORD, None)
if table:
for record in table.records:
self._ParseApplicationPasswordRecord(parser_mediator, record)
table = tables.get(self._RECORD_TYPE_INTERNET_PASSWORD, None)
if table:
for record in table.records:
self._ParseInternetPasswordRecord(parser_mediator, record)
| 288,536 |
Initializes the output module object.
Args:
output_mediator (OutputMediator): mediates interactions between output
modules and other components, such as storage and dfvfs.
Raises:
ValueError: when there are unused keyword arguments.
|
def __init__(self, output_mediator):
super(Shared4n6TimeOutputModule, self).__init__(output_mediator)
self._append = False
self._evidence = '-'
self._fields = self._DEFAULT_FIELDS
self._set_status = None
| 288,537 |
Formats the date and time.
Args:
event (EventObject): event.
Returns:
str: date and time string or "N/A" if no event timestamp is available.
|
def _FormatDateTime(self, event):
if not event.timestamp:
return 'N/A'
# TODO: preserve dfdatetime as an object.
# TODO: add support for self._output_mediator.timezone
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=event.timestamp)
year, month, day_of_month = date_time.GetDate()
hours, minutes, seconds = date_time.GetTimeOfDay()
try:
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(
year, month, day_of_month, hours, minutes, seconds)
except (TypeError, ValueError):
self._ReportEventError(event, (
'unable to copy timestamp: {0!s} to a human readable date and '
'time. Defaulting to: "0000-00-00 00:00:00"').format(event.timestamp))
return '0000-00-00 00:00:00'
| 288,538 |
Sanitizes the event for use in 4n6time.
Args:
event (EventObject): event.
Returns:
dict[str, object]: dictionary containing the sanitized event values.
Raises:
NoFormatterFound: If no event formatter can be found to match the data
type in the event object.
|
def _GetSanitizedEventValues(self, event):
data_type = getattr(event, 'data_type', 'UNKNOWN')
event_formatter = self._output_mediator.GetEventFormatter(event)
if not event_formatter:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
source_short, source = self._output_mediator.GetFormattedSources(event)
if source is None or source_short is None:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
datetime_string = self._FormatDateTime(event)
format_variables = self._output_mediator.GetFormatStringAttributeNames(
event)
if format_variables is None:
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
extra_attributes = []
for attribute_name, attribute_value in sorted(event.GetAttributes()):
if (attribute_name in definitions.RESERVED_VARIABLE_NAMES or
attribute_name in format_variables):
continue
extra_attributes.append(
'{0:s}: {1!s} '.format(attribute_name, attribute_value))
extra_attributes = ' '.join(extra_attributes)
inode = event.inode
if inode is None and hasattr(event, 'pathspec'):
inode = getattr(event.pathspec, 'inode', '-')
if inode is None:
inode = '-'
tags = None
if getattr(event, 'tag', None):
tags = getattr(event.tag, 'tags', None)
taglist = ''
if isinstance(tags, (list, tuple)):
taglist = ','.join(tags)
offset = event.offset
if offset is None:
offset = 0
row = {
'timezone': '{0!s}'.format(self._output_mediator.timezone),
'MACB': self._output_mediator.GetMACBRepresentation(event),
'source': source_short,
'sourcetype': source,
'type': event.timestamp_desc or '-',
'user': getattr(event, 'username', '-'),
'host': getattr(event, 'hostname', '-'),
'description': message,
'filename': getattr(event, 'filename', '-'),
'inode': inode,
'notes': getattr(event, 'notes', '-'),
'format': getattr(event, 'parser', '-'),
'extra': extra_attributes,
'datetime': datetime_string,
'reportnotes': '',
'inreport': '',
'tag': taglist,
'offset': offset,
'vss_store_number': self._GetVSSNumber(event),
'URL': getattr(event, 'url', '-'),
'record_number': getattr(event, 'record_number', 0),
'event_identifier': getattr(event, 'event_identifier', '-'),
'event_type': getattr(event, 'event_type', '-'),
'source_name': getattr(event, 'source_name', '-'),
'user_sid': getattr(event, 'user_sid', '-'),
'computer_name': getattr(event, 'computer_name', '-'),
'evidence': self._evidence}
return row
| 288,539 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.