docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Check equality of nodes based on the comparison of their attributes named attr_name.
Args:
node_a (astroid.node): first node to compare.
node_b (astroid.node): second node to compare.
attr_name (str): name of the nodes attribute to use for comparison.
Returns:
bool: True if node_a.attr_name == node_b.attr_name, False otherwise.
|
def _check_arg_equality(node_a, node_b, attr_name):
return getattr(node_a, attr_name) == getattr(node_b, attr_name)
| 274,914 |
Check that all return statements inside a function are consistent.
Return statements are consistent if:
- all returns are explicit and if there is no implicit return;
- all returns are empty and if there is, possibly, an implicit return.
Args:
node (astroid.FunctionDef): the function holding the return statements.
|
def _check_consistent_returns(self, node):
# explicit return statements are those with a not None value
explicit_returns = [
_node for _node in self._return_nodes[node.name] if _node.value is not None
]
if not explicit_returns:
return
if len(explicit_returns) == len(
self._return_nodes[node.name]
) and self._is_node_return_ended(node):
return
self.add_message("inconsistent-return-statements", node=node)
| 275,046 |
Check if the node ends with an explicit return statement.
Args:
node (astroid.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
|
def _is_node_return_ended(self, node):
# Recursion base case
if isinstance(node, astroid.Return):
return True
if isinstance(node, astroid.Call):
try:
funcdef_node = node.func.inferred()[0]
if self._is_function_def_never_returning(funcdef_node):
return True
except astroid.InferenceError:
pass
# Avoid the check inside while loop as we don't know
# if they will be completed
if isinstance(node, astroid.While):
return True
if isinstance(node, astroid.Raise):
# a Raise statement doesn't need to end with a return statement
# but if the exception raised is handled, then the handler has to
# ends with a return statement
if not node.exc:
# Ignore bare raises
return True
if not utils.is_node_inside_try_except(node):
# If the raise statement is not inside a try/except statement
# then the exception is raised and cannot be caught. No need
# to infer it.
return True
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return False
exc_name = exc.pytype().split(".")[-1]
handlers = utils.get_exception_handlers(node, exc_name)
handlers = list(handlers) if handlers is not None else []
if handlers:
# among all the handlers handling the exception at least one
# must end with a return statement
return any(
self._is_node_return_ended(_handler) for _handler in handlers
)
# if no handlers handle the exception then it's ok
return True
if isinstance(node, astroid.If):
# if statement is returning if there are exactly two return statements in its
# children : one for the body part, the other for the orelse part
# Do not check if inner function definition are return ended.
is_orelse_returning = any(
self._is_node_return_ended(_ore)
for _ore in node.orelse
if not isinstance(_ore, astroid.FunctionDef)
)
is_if_returning = any(
self._is_node_return_ended(_ifn)
for _ifn in node.body
if not isinstance(_ifn, astroid.FunctionDef)
)
return is_if_returning and is_orelse_returning
# recurses on the children of the node except for those which are except handler
# because one cannot be sure that the handler will really be used
return any(
self._is_node_return_ended(_child)
for _child in node.get_children()
if not isinstance(_child, astroid.ExceptHandler)
)
| 275,047 |
Check that there are not unnecessary parens after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
|
def _check_keyword_parentheses(self, tokens, start):
# If the next token is not a paren, we're fine.
if self._inside_brackets(":") and tokens[start][1] == "for":
self._pop_token()
if tokens[start + 1][1] != "(":
return
found_and_or = False
depth = 0
keyword_token = str(tokens[start][1])
line_num = tokens[start][2][0]
for i in range(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token[0] == tokenize.NL:
return
if token[1] == "(":
depth += 1
elif token[1] == ")":
depth -= 1
if depth:
continue
# ')' can't happen after if (foo), since it would be a syntax error.
if tokens[i + 1][1] in (":", ")", "]", "}", "in") or tokens[i + 1][
0
] in (tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT):
# The empty tuple () is always accepted.
if i == start + 2:
return
if keyword_token == "not":
if not found_and_or:
self.add_message(
"superfluous-parens", line=line_num, args=keyword_token
)
elif keyword_token in ("return", "yield"):
self.add_message(
"superfluous-parens", line=line_num, args=keyword_token
)
elif keyword_token not in self._keywords_with_parens:
if not found_and_or:
self.add_message(
"superfluous-parens", line=line_num, args=keyword_token
)
return
elif depth == 1:
# This is a tuple, which is always acceptable.
if token[1] == ",":
return
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
if token[1] in ("and", "or"):
found_and_or = True
# A yield inside an expression must always be in parentheses,
# quit early without error.
elif token[1] == "yield":
return
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
elif token[1] == "for":
return
| 275,123 |
Determines if a BoundMethod node represents a method call.
Args:
func (astroid.BoundMethod): The BoundMethod AST node to check.
types (Optional[String]): Optional sequence of caller type names to restrict check.
methods (Optional[String]): Optional sequence of method names to restrict check.
Returns:
bool: true if the node represents a method call for the given type and
method names, False otherwise.
|
def is_method_call(func, types=(), methods=()):
return (
isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and (func.bound.name in types if types else True)
and (func.name in methods if methods else True)
)
| 275,226 |
Checks if node represents a string with complex formatting specs.
Args:
node (astroid.node_classes.NodeNG): AST node to check
Returns:
bool: True if inferred string uses complex formatting, False otherwise
|
def is_complex_format_str(node):
inferred = utils.safe_infer(node)
if inferred is None or not isinstance(inferred.value, str):
return True
try:
parsed = list(string.Formatter().parse(inferred.value))
except ValueError:
# This format string is invalid
return False
for _, _, format_spec, _ in parsed:
if format_spec:
return True
return False
| 275,227 |
Checks that function call is not format_string.format().
Args:
node (astroid.node_classes.Call):
Call AST node to be checked.
|
def _check_call_func(self, node):
func = utils.safe_infer(node.func)
types = ("str", "unicode")
methods = ("format",)
if is_method_call(func, types, methods) and not is_complex_format_str(
func.bound
):
self.add_message("logging-format-interpolation", node=node)
| 275,233 |
Checks that format string tokens match the supplied arguments.
Args:
node (astroid.node_classes.NodeNG): AST node to be checked.
format_arg (int): Index of the format string in the node arguments.
|
def _check_format_string(self, node, format_arg):
num_args = _count_supplied_tokens(node.args[format_arg + 1 :])
if not num_args:
# If no args were supplied the string is not interpolated and can contain
# formatting characters - it's used verbatim. Don't check any further.
return
format_string = node.args[format_arg].value
if not isinstance(format_string, str):
# If the log format is constant non-string (e.g. logging.debug(5)),
# ensure there are no arguments.
required_num_args = 0
else:
try:
if self._format_style == "old":
keyword_args, required_num_args, _, _ = utils.parse_format_string(
format_string
)
if keyword_args:
# Keyword checking on logging strings is complicated by
# special keywords - out of scope.
return
elif self._format_style == "new":
keyword_arguments, implicit_pos_args, explicit_pos_args = utils.parse_format_method_string(
format_string
)
keyword_args_cnt = len(
set(k for k, l in keyword_arguments if not isinstance(k, int))
)
required_num_args = (
keyword_args_cnt + implicit_pos_args + explicit_pos_args
)
except utils.UnsupportedFormatCharacter as ex:
char = format_string[ex.index]
self.add_message(
"logging-unsupported-format",
node=node,
args=(char, ord(char), ex.index),
)
return
except utils.IncompleteFormatString:
self.add_message("logging-format-truncated", node=node)
return
if num_args > required_num_args:
self.add_message("logging-too-many-args", node=node)
elif num_args < required_num_args:
self.add_message("logging-too-few-args", node=node)
| 275,234 |
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
|
def _get_break_loop_node(break_node):
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
| 275,238 |
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
|
def _loop_exits_early(loop):
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
| 275,239 |
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Raises:
FormatError: if the instance does not conform to ``format``
|
def check(self, instance, format):
if format not in self.checkers:
return
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
raise FormatError(
"%r is not a %r" % (instance, format), cause=cause,
)
| 278,677 |
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Returns:
bool: whether it conformed
|
def conforms(self, instance, format):
try:
self.check(instance, format)
except FormatError:
return False
else:
return True
| 278,678 |
Generate newer-style type checks out of JSON-type-name-to-type mappings.
Arguments:
types (dict):
A mapping of type names to their Python types
Returns:
A dictionary of definitions to pass to `TypeChecker`
|
def _generate_legacy_type_checks(types=()):
types = dict(types)
def gen_type_check(pytypes):
pytypes = _utils.flatten(pytypes)
def type_check(checker, instance):
if isinstance(instance, bool):
if bool not in pytypes:
return False
return isinstance(instance, pytypes)
return type_check
definitions = {}
for typename, pytypes in iteritems(types):
definitions[typename] = gen_type_check(pytypes)
return definitions
| 278,679 |
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
Arguments:
version (str):
An identifier to use as the version's name
Returns:
callable: a class decorator to decorate the validator with the version
|
def validates(version):
def _validates(cls):
validators[version] = cls
meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
if meta_schema_id:
meta_schemas[meta_schema_id] = cls
return cls
return _validates
| 278,680 |
Construct a resolver from a JSON schema object.
Arguments:
schema:
the referring schema
Returns:
`RefResolver`
|
def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
| 278,688 |
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
|
def resolving(self, ref):
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
| 278,690 |
Install the plugin.
Arguments:
app (sphinx.application.Sphinx):
the Sphinx application context
|
def setup(app):
app.add_config_value("cache_path", "_cache", "")
try:
os.makedirs(app.config.cache_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
path = os.path.join(app.config.cache_path, "spec.html")
spec = fetch_or_load(path)
app.add_role("validator", docutils_sucks(spec))
| 278,738 |
Fetch a new specification or use the cache if it's current.
Arguments:
cache_path:
the path to a cached specification
|
def fetch_or_load(spec_path):
headers = {}
try:
modified = datetime.utcfromtimestamp(os.path.getmtime(spec_path))
date = modified.strftime("%a, %d %b %Y %I:%M:%S UTC")
headers["If-Modified-Since"] = date
except OSError as error:
if error.errno != errno.ENOENT:
raise
request = urllib.Request(VALIDATION_SPEC, headers=headers)
response = urllib.urlopen(request, cafile=certifi.where())
if response.code == 200:
with open(spec_path, "w+b") as spec:
spec.writelines(response)
spec.seek(0)
return html.parse(spec)
with open(spec_path) as spec:
return html.parse(spec)
| 278,739 |
Sort an iterable of OrderedBase instances.
Args:
items (iterable): the objects to sort
getter (callable or None): a function to extract the OrderedBase instance from an object.
Examples:
>>> sort_ordered_objects([x, y, z])
>>> sort_ordered_objects(v.items(), getter=lambda e: e[1])
|
def sort_ordered_objects(items, getter=lambda x: x):
return sorted(items, key=lambda x: getattr(getter(x), OrderedBase.CREATION_COUNTER_FIELD, -1))
| 280,497 |
Reset the sequence counter.
Args:
value (int or None): the new 'next' sequence value; if None,
recompute the next value from _setup_next_sequence().
force (bool): whether to force-reset parent sequence counters
in a factory inheritance chain.
|
def reset_sequence(cls, value=None, force=False):
cls._meta.reset_sequence(value, force=force)
| 280,525 |
Retrieve a copy of the declared attributes.
Args:
extra_defs (dict): additional definitions to insert into the
retrieved DeclarationDict.
|
def declarations(cls, extra_defs=None):
warnings.warn(
"Factory.declarations is deprecated; use Factory._meta.pre_declarations instead.",
DeprecationWarning,
stacklevel=2,
)
decls = cls._meta.pre_declarations.as_dict()
decls.update(extra_defs or {})
return decls
| 280,527 |
generate the object.
Args:
params (dict): attributes to use for generating the object
strategy: the strategy to use
|
def _generate(cls, strategy, params):
if cls._meta.abstract:
raise errors.FactoryError(
"Cannot generate instances of abstract factory %(f)s; "
"Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract "
"is either not set or False." % dict(f=cls.__name__))
step = builder.StepBuilder(cls._meta, params, strategy)
return step.build()
| 280,528 |
Build a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to build
Returns:
object list: the built instances
|
def build_batch(cls, size, **kwargs):
return [cls.build(**kwargs) for _ in range(size)]
| 280,529 |
Create a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to create
Returns:
object list: the created instances
|
def create_batch(cls, size, **kwargs):
return [cls.create(**kwargs) for _ in range(size)]
| 280,530 |
Stub a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to stub
Returns:
object list: the stubbed instances
|
def stub_batch(cls, size, **kwargs):
return [cls.stub(**kwargs) for _ in range(size)]
| 280,531 |
Generate a new instance.
The instance will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
Returns:
object: the generated instance
|
def generate(cls, strategy, **kwargs):
assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)
action = getattr(cls, strategy)
return action(**kwargs)
| 280,532 |
Generate a batch of instances.
The instances will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
size (int): the number of instances to generate
Returns:
object list: the generated instances
|
def generate_batch(cls, strategy, size, **kwargs):
assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)
batch_action = getattr(cls, '%s_batch' % strategy)
return batch_action(size, **kwargs)
| 280,533 |
Generate a new instance.
The instance will be either 'built' or 'created'.
Args:
create (bool): whether to 'build' or 'create' the instance.
Returns:
object: the generated instance
|
def simple_generate(cls, create, **kwargs):
strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY
return cls.generate(strategy, **kwargs)
| 280,534 |
Generate a batch of instances.
These instances will be either 'built' or 'created'.
Args:
size (int): the number of instances to generate
create (bool): whether to 'build' or 'create' the instances.
Returns:
object list: the generated instances
|
def simple_generate_batch(cls, create, size, **kwargs):
strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY
return cls.generate_batch(strategy, size, **kwargs)
| 280,535 |
Try to retrieve the given attribute of an object, digging on '.'.
This is an extended getattr, digging deeper if '.' is found.
Args:
obj (object): the object of which an attribute should be read
name (str): the name of an attribute to look up.
default (object): the default value to use if the attribute wasn't found
Returns:
the attribute pointed to by 'name', splitting on '.'.
Raises:
AttributeError: if obj has no 'name' attribute.
|
def deepgetattr(obj, name, default=_UNSPECIFIED):
try:
if '.' in name:
attr, subname = name.split('.', 1)
return deepgetattr(getattr(obj, attr), subname, default)
else:
return getattr(obj, name)
except AttributeError:
if default is _UNSPECIFIED:
raise
else:
return default
| 280,567 |
Evaluate the current ContainerAttribute.
Args:
obj (LazyStub): a lazy stub of the object being constructed, if
needed.
containers (list of LazyStub): a list of lazy stubs of factories
being evaluated in a chain, each item being a future field of
next one.
|
def evaluate(self, instance, step, extra):
# Strip the current instance from the chain
chain = step.chain[1:]
if self.strict and not chain:
raise TypeError(
"A ContainerAttribute in 'strict' mode can only be used "
"within a SubFactory.")
return self.function(instance, chain)
| 280,579 |
Evaluate the current definition and fill its attributes.
Args:
step: a factory.builder.BuildStep
params (dict): additional, call-time added kwargs
for the step.
|
def generate(self, step, params):
subfactory = self.get_factory()
logger.debug(
"SubFactory: Instantiating %s.%s(%s), create=%r",
subfactory.__module__, subfactory.__name__,
utils.log_pprint(kwargs=params),
step,
)
force_sequence = step.sequence if self.FORCE_SEQUENCE else None
return step.recurse(subfactory, params, force_sequence=force_sequence)
| 280,587 |
Add new declarations to this set/
Args:
values (dict(name, declaration)): the declarations to ingest.
|
def update(self, values):
for k, v in values.items():
root, sub = self.split(k)
if sub is None:
self.declarations[root] = v
else:
self.contexts[root][sub] = v
extra_context_keys = set(self.contexts) - set(self.declarations)
if extra_context_keys:
raise errors.InvalidDeclarationError(
"Received deep context for unknown fields: %r (known=%r)" % (
{
self.join(root, sub): v
for root in extra_context_keys
for sub, v in self.contexts[root].items()
},
sorted(self.declarations),
)
)
| 280,608 |
Convenience method to unwrap all Alias(es) from around a DataType.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool]: The underlying data type and a bool indicating
whether the input type had at least one alias layer.
|
def unwrap_aliases(data_type):
unwrapped_alias = False
while is_alias(data_type):
unwrapped_alias = True
data_type = data_type.data_type
return data_type, unwrapped_alias
| 284,443 |
Resolve all chained / nested aliases. This will recursively point
nested aliases to their resolved data type (first non-alias in the chain).
Note: This differs from unwrap_alias which simply identifies/returns
the resolved data type.
Args:
data_type (DataType): The target DataType/Alias to resolve.
Return:
DataType: The resolved type.
|
def resolve_aliases(data_type):
if not is_alias(data_type):
return data_type
resolved = resolve_aliases(data_type.data_type)
data_type.data_type = resolved
return resolved
| 284,444 |
Strip alias from a data_type chain - this function should be
used *after* aliases are resolved (see resolve_aliases fn):
Loops through given data type chain (unwraps types), replaces
first alias with underlying type, and then terminates.
Note: Stops on encountering the first alias as it assumes
intermediate aliases are already removed.
Args:
data_type (DataType): The target DataType chain to strip.
Return:
None
|
def strip_alias(data_type):
while hasattr(data_type, 'data_type'):
if is_alias(data_type.data_type):
data_type.data_type = data_type.data_type.data_type
break
data_type = data_type.data_type
| 284,445 |
Convenience method to unwrap all Aliases and Nullables from around a
DataType. This checks for nullable wrapping aliases, as well as aliases
wrapping nullables.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool, bool]: The underlying data type; a bool that is
set if a nullable was present; a bool that is set if an alias was
present.
|
def unwrap(data_type):
unwrapped_nullable = False
unwrapped_alias = False
while is_alias(data_type) or is_nullable_type(data_type):
if is_nullable_type(data_type):
unwrapped_nullable = True
if is_alias(data_type):
unwrapped_alias = True
data_type = data_type.data_type
return data_type, unwrapped_nullable, unwrapped_alias
| 284,446 |
Returns an OrderedDict mapping labels to Example objects.
Args:
compact (bool): If True, union members of void type are converted
to their compact representation: no ".tag" key or containing
dict, just the tag as a string.
|
def get_examples(self, compact=False):
# Copy it just in case the caller wants to mutate the object.
examples = copy.deepcopy(self._examples)
if not compact:
return examples
def make_compact(d):
# Traverse through dicts looking for ones that have a lone .tag
# key, which can be converted into the compact form.
if not isinstance(d, dict):
return
for key in d:
if isinstance(d[key], dict):
inner_d = d[key]
if len(inner_d) == 1 and '.tag' in inner_d:
d[key] = inner_d['.tag']
else:
make_compact(inner_d)
if isinstance(d[key], list):
for item in d[key]:
make_compact(item)
for example in examples.values():
if (isinstance(example.value, dict) and
len(example.value) == 1 and '.tag' in example.value):
# Handle the case where the top-level of the example can be
# made compact.
example.value = example.value['.tag']
else:
make_compact(example.value)
return examples
| 284,481 |
Adds a "raw example" for this type.
This does basic sanity checking to ensure that the example is valid
(required fields specified, no unknown fields, correct types, ...).
The example is not available via :meth:`get_examples` until
:meth:`_compute_examples` is called.
Args:
example (stone.frontend.ast.AstExample): An example of this type.
|
def _add_example(self, example):
if self.has_enumerated_subtypes():
self._add_example_enumerated_subtypes_helper(example)
else:
self._add_example_helper(example)
| 284,492 |
Adds a "raw example" for this type.
This does basic sanity checking to ensure that the example is valid
(required fields specified, no unknown fields, correct types, ...).
The example is not available via :meth:`get_examples` until
:meth:`_compute_examples` is called.
Args:
example (stone.frontend.ast.AstExample): An example of this
type.
|
def _add_example(self, example):
if len(example.fields) != 1:
raise InvalidSpec(
'Example for union must specify exactly one tag.',
example.lineno, example.path)
# Extract the only tag in the example.
example_field = list(example.fields.values())[0]
tag = example_field.name
# Find the union member that corresponds to the tag.
for field in self.all_fields:
if tag == field.name:
break
else:
# Error: Tag doesn't match any union member.
raise InvalidSpec(
"Unknown tag '%s' in example." % tag,
example.lineno, example.path
)
# TODO: are we always guaranteed at least one field?
# pylint: disable=undefined-loop-variable
try:
field.data_type.check_example(example_field)
except InvalidSpec as e:
e.msg = "Bad example for field '{}': {}".format(
field.name, e.msg)
raise
self._raw_examples[example.label] = example
| 284,504 |
Generates code to call a function.
Args:
name (str): The function name.
args (list[str]): Each positional argument.
kwargs (list[tuple]): Each tuple is (arg: str, value: str). If
value is None, then the keyword argument is omitted. Otherwise,
if the value is not a string, then str() is called on it.
Returns:
str: Code to call a function.
|
def generate_func_call(name, args=None, kwargs=None):
all_args = []
if args:
all_args.extend(args)
if kwargs:
all_args.extend('{}={}'.format(k, v)
for k, v in kwargs if v is not None)
return '{}({})'.format(name, ', '.join(all_args))
| 284,649 |
Encodes an object into a JSON-compatible dict based on its type.
Args:
data_type (Validator): Validator for obj.
obj (object): Object to be serialized.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
Returns:
An object that when passed to json.dumps() will produce a string
giving the JSON-encoded object.
See json_encode() for additional information about validation.
|
def json_compat_obj_encode(data_type, obj, caller_permissions=None, alias_validators=None,
old_style=False, for_msgpack=False, should_redact=False):
serializer = StoneToPythonPrimitiveSerializer(
caller_permissions, alias_validators, for_msgpack, old_style, should_redact)
return serializer.encode(data_type, obj)
| 284,685 |
Given a documentation string, parse it and return all references to other
data types and routes.
Args:
- api: The API containing this doc ref.
- doc: The documentation string to parse.
- namespace_context: The namespace name relative to this documentation.
- ignore_missing_entries: If set, this will skip references to nonexistent data types instead
of raising an exception.
Returns:
- a tuple of referenced data types and routes
|
def parse_data_types_and_routes_from_doc_ref(
api,
doc,
namespace_context,
ignore_missing_entries=False
):
assert doc is not None
data_types = set()
routes = defaultdict(set)
for match in doc_ref_re.finditer(doc):
try:
tag = match.group('tag')
val = match.group('val')
supplied_namespace = api.namespaces[namespace_context]
if tag == 'field':
if '.' in val:
type_name, __ = val.split('.', 1)
doc_type = supplied_namespace.data_type_by_name[type_name]
data_types.add(doc_type)
else:
pass # no action required, because we must be referencing the same object
elif tag == 'route':
if '.' in val:
namespace_name, val = val.split('.', 1)
namespace = api.namespaces[namespace_name]
else:
namespace = supplied_namespace
try:
route_name, version = parse_route_name_and_version(val)
except ValueError as ex:
raise KeyError(str(ex))
route = namespace.routes_by_name[route_name].at_version[version]
routes[namespace.name].add(route)
elif tag == 'type':
if '.' in val:
namespace_name, val = val.split('.', 1)
doc_type = api.namespaces[namespace_name].data_type_by_name[val]
data_types.add(doc_type)
else:
doc_type = supplied_namespace.data_type_by_name[val]
data_types.add(doc_type)
except KeyError:
if not ignore_missing_entries:
raise
return data_types, routes
| 284,775 |
Checks that the namespace is declared first in the spec, and that only
one namespace is declared.
Args:
desc (List[stone.stone.parser.ASTNode]): All AST nodes in a spec
file in the order they were defined.
Return:
stone.frontend.ast.AstNamespace: The namespace AST node.
|
def _extract_namespace_ast_node(self, desc):
if len(desc) == 0 or not isinstance(desc[0], AstNamespace):
if self._debug:
self._logger.info('Description: %r', desc)
raise InvalidSpec('First declaration in a stone must be '
'a namespace. Possibly caused by preceding '
'errors.', desc[0].lineno, desc[0].path)
for item in desc[1:]:
if isinstance(item, AstNamespace):
raise InvalidSpec('Only one namespace declaration per file.',
item[0].lineno, item[0].path)
return desc.pop(0)
| 284,778 |
From the raw output of the parser, create forward references for each
user-defined type (struct, union, route, and alias).
Args:
namespace (stone.api.Namespace): Namespace for definitions.
desc (List[stone.stone.parser._Element]): All AST nodes in a spec
file in the order they were defined. Should not include a
namespace declaration.
|
def _add_data_types_and_routes_to_api(self, namespace, desc):
env = self._get_or_create_env(namespace.name)
for item in desc:
if isinstance(item, AstTypeDef):
api_type = self._create_type(env, item)
namespace.add_data_type(api_type)
self._check_canonical_name_available(item, namespace.name)
elif isinstance(item, AstStructPatch) or isinstance(item, AstUnionPatch):
# Handle patches later.
base_name = self._get_base_name(item.name, namespace.name)
self._patch_data_by_canonical_name[base_name] = (item, namespace)
elif isinstance(item, AstRouteDef):
route = self._create_route(env, item)
namespace.add_route(route)
self._check_canonical_name_available(item, namespace.name, allow_duplicate=True)
elif isinstance(item, AstImport):
# Handle imports later.
pass
elif isinstance(item, AstAlias):
alias = self._create_alias(env, item)
namespace.add_alias(alias)
self._check_canonical_name_available(item, namespace.name)
elif isinstance(item, AstAnnotationDef):
annotation = self._create_annotation(env, item)
namespace.add_annotation(annotation)
self._check_canonical_name_available(item, namespace.name)
elif isinstance(item, AstAnnotationTypeDef):
annotation_type = self._create_annotation_type(env, item)
namespace.add_annotation_type(annotation_type)
self._check_canonical_name_available(item, namespace.name)
else:
raise AssertionError('Unknown AST node type %r' %
item.__class__.__name__)
| 284,779 |
Scans raw parser output for import declarations. Checks if the imports
are valid, and then creates a reference to the namespace in the
environment.
Args:
raw_api (Tuple[Namespace, List[stone.stone.parser._Element]]):
Namespace paired with raw parser output.
|
def _add_imports_to_env(self, raw_api):
for namespace, desc in raw_api:
for item in desc:
if isinstance(item, AstImport):
if namespace.name == item.target:
raise InvalidSpec('Cannot import current namespace.',
item.lineno, item.path)
if item.target not in self.api.namespaces:
raise InvalidSpec(
'Namespace %s is not defined in any spec.' %
quote(item.target),
item.lineno, item.path)
env = self._get_or_create_env(namespace.name)
imported_env = self._get_or_create_env(item.target)
if namespace.name in imported_env:
# Block circular imports. The Python backend can't
# easily generate code for circular references.
raise InvalidSpec(
'Circular import of namespaces %s and %s '
'detected.' %
(quote(namespace.name), quote(item.target)),
item.lineno, item.path)
env[item.target] = imported_env
| 284,783 |
Responsible for instantiating a data type with additional attributes.
This method ensures that the specified attributes are valid.
Args:
data_type_class (DataType): The class to instantiate.
data_type_attrs (dict): A map from str -> values of attributes.
These will be passed into the constructor of data_type_class
as keyword arguments.
Returns:
stone.data_type.DataType: A parameterized instance.
|
def _instantiate_data_type(self, data_type_class, data_type_args, loc):
assert issubclass(data_type_class, DataType), \
'Expected stone.data_type.DataType, got %r' % data_type_class
argspec = inspect.getargspec(data_type_class.__init__) # noqa: E501 # pylint: disable=deprecated-method,useless-suppression
argspec.args.remove('self')
num_args = len(argspec.args)
# Unfortunately, argspec.defaults is None if there are no defaults
num_defaults = len(argspec.defaults or ())
pos_args, kw_args = data_type_args
if (num_args - num_defaults) > len(pos_args):
# Report if a positional argument is missing
raise InvalidSpec(
'Missing positional argument %s for %s type' %
(quote(argspec.args[len(pos_args)]),
quote(data_type_class.__name__)),
*loc)
elif (num_args - num_defaults) < len(pos_args):
# Report if there are too many positional arguments
raise InvalidSpec(
'Too many positional arguments for %s type' %
quote(data_type_class.__name__),
*loc)
# Map from arg name to bool indicating whether the arg has a default
args = {}
for i, key in enumerate(argspec.args):
args[key] = (i >= num_args - num_defaults)
for key in kw_args:
# Report any unknown keyword arguments
if key not in args:
raise InvalidSpec('Unknown argument %s to %s type.' %
(quote(key), quote(data_type_class.__name__)),
*loc)
# Report any positional args that are defined as keywords args.
if not args[key]:
raise InvalidSpec(
'Positional argument %s cannot be specified as a '
'keyword argument.' % quote(key),
*loc)
del args[key]
try:
return data_type_class(*pos_args, **kw_args)
except ParameterError as e:
# Each data type validates its own attributes, and will raise a
# ParameterError if the type or value is bad.
raise InvalidSpec('Bad argument to %s type: %s' %
(quote(data_type_class.__name__), e.args[0]),
*loc)
| 284,800 |
Constructs a route and adds it to the environment.
Args:
env (dict): The environment of defined symbols. A new key is added
corresponding to the name of this new route.
item (AstRouteDef): Raw route definition from the parser.
Returns:
stone.api.ApiRoutesByVersion: A group of fully-defined routes indexed by versions.
|
def _create_route(self, env, item):
if item.name in env:
if isinstance(env[item.name], ApiRoutesByVersion):
if item.version in env[item.name].at_version:
existing_dt = env[item.name].at_version[item.version]
raise InvalidSpec(
'Route %s at version %d already defined (%s:%d).' % (
quote(item.name), item.version, existing_dt._ast_node.path,
existing_dt._ast_node.lineno),
item.lineno, item.path)
else:
existing_dt = env[item.name]
raise InvalidSpec(
'Symbol %s already defined (%s:%d).' % (
quote(item.name), existing_dt._ast_node.path,
existing_dt._ast_node.lineno),
item.lineno, item.path)
else:
env[item.name] = ApiRoutesByVersion()
route = ApiRoute(
name=item.name,
version=item.version,
ast_node=item,
)
env[route.name].at_version[route.version] = route
return route
| 284,804 |
Processes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
|
def _ProcessEvent(self, mediator, event):
try:
self._analysis_plugin.ExamineEvent(mediator, event)
except Exception as exception: # pylint: disable=broad-except
self.SignalAbort()
# TODO: write analysis error.
if self._debug_output:
logger.warning('Unhandled exception while processing event object.')
logger.exception(exception)
| 287,595 |
Retrieves an ISO 8601 date time string from the structure.
The date and time values in Google Drive Sync log files are formatted as:
"2018-01-24 18:25:08,454 -0800".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string.
|
def _GetISO8601String(self, structure):
time_zone_offset = structure.time_zone_offset
try:
time_zone_offset_hours = int(time_zone_offset[1:3], 10)
time_zone_offset_minutes = int(time_zone_offset[3:5], 10)
except (IndexError, TypeError, ValueError) as exception:
raise ValueError(
'unable to parse time zone offset with error: {0!s}.'.format(
exception))
try:
iso8601 = (
'{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.{6:03d}'
'{7:s}{8:02d}:{9:02d}').format(
structure.year, structure.month, structure.day,
structure.hours, structure.minutes, structure.seconds,
structure.microseconds, time_zone_offset[0],
time_zone_offset_hours, time_zone_offset_minutes)
except ValueError as exception:
raise ValueError(
'unable to format date time string with error: {0!s}.'.format(
exception))
return iso8601
| 287,598 |
Parses a logline record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
|
def _ParseRecordLogline(self, parser_mediator, structure):
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
try:
datetime_iso8601 = self._GetISO8601String(structure.date_time)
date_time.CopyFromStringISO8601(datetime_iso8601)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = GoogleDriveSyncLogEventData()
event_data.log_level = structure.log_level
event_data.pid = structure.pid
event_data.thread = structure.thread
event_data.source_code = structure.source_code
# Replace newlines with spaces in structure.message to preserve output.
event_data.message = structure.message.replace('\n', ' ')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,599 |
Verify that this file is a Google Drive Sync log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
|
def VerifyStructure(self, parser_mediator, lines):
try:
structure = self._GDS_LINE.parseString(lines)
except pyparsing.ParseException as exception:
logger.debug('Not a Google Drive Sync log file: {0!s}'.format(exception))
return False
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
try:
datetime_iso8601 = self._GetISO8601String(structure.date_time)
date_time.CopyFromStringISO8601(datetime_iso8601)
except ValueError as exception:
logger.debug((
'Not a Google Drive Sync log file, invalid date/time: {0!s} '
'with error: {1!s}').format(structure.date_time, exception))
return False
return True
| 287,600 |
Retrieves an URL from a reference to an entry in the from_visit table.
Args:
url (str): URL.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: URL or an empty string if no URL was found.
|
def _GetUrl(self, url, cache, database):
if not url:
return ''
url_cache_results = cache.GetResults('url')
if not url_cache_results:
result_set = database.Query(self._URL_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'url', 'id', ('url', 'title'))
url_cache_results = cache.GetResults('url')
reference_url, reference_title = url_cache_results.get(url, ['', ''])
if not reference_url:
return ''
return '{0:s} ({1:s})'.format(reference_url, reference_title)
| 287,603 |
Retrieves a visit source type based on the identifier.
Args:
visit_identifier (str): identifier from the visits table for the
particular record.
cache (SQLiteCache): cache which contains cached results from querying
the visit_source table.
database (SQLiteDatabase): database.
Returns:
int: visit source type or None if no visit source type was found for
the identifier.
|
def _GetVisitSource(self, visit_identifier, cache, database):
sync_cache_results = cache.GetResults('sync')
if not sync_cache_results:
result_set = database.Query(self._SYNC_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'sync', 'id', ('source',))
sync_cache_results = cache.GetResults('sync')
if sync_cache_results and visit_identifier:
results = sync_cache_results.get(visit_identifier, None)
if results:
return results[0]
return None
| 287,604 |
Parses a last visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache which contains cached results from querying
the visits and urls tables.
database (Optional[SQLiteDatabase]): database.
|
def ParseLastVisitedRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
query_hash = hash(query)
hidden = self._GetRowValue(query_hash, row, 'hidden')
transition = self._GetRowValue(query_hash, row, 'transition')
visit_identifier = self._GetRowValue(query_hash, row, 'visit_id')
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
event_data = ChromeHistoryPageVisitedEventData()
event_data.from_visit = self._GetUrl(from_visit, cache, database)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.page_transition_type = (
transition & self._PAGE_TRANSITION_CORE_MASK)
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.url_hidden = hidden == '1'
event_data.visit_source = self._GetVisitSource(
visit_identifier, cache, database)
timestamp = self._GetRowValue(query_hash, row, 'visit_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,605 |
Parses a file downloaded row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
def ParseFileDownloadedRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = ChromeHistoryFileDownloadedEventData()
event_data.full_path = self._GetRowValue(query_hash, row, 'target_path')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.received_bytes = self._GetRowValue(
query_hash, row, 'received_bytes')
event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'start_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,606 |
Initializes a command line interface tool.
Args:
input_reader (Optional[CLIInputReader]): input reader, where None
indicates that the stdin input reader should be used.
output_writer (Optional[CLIOutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
|
def __init__(self, input_reader=None, output_writer=None):
super(CLITool, self).__init__()
preferred_encoding = locale.getpreferredencoding()
if not preferred_encoding:
preferred_encoding = self._PREFERRED_ENCODING
elif isinstance(preferred_encoding, py2to3.BYTES_TYPE):
preferred_encoding = preferred_encoding.decode('utf-8')
if not input_reader:
input_reader = StdinInputReader(encoding=preferred_encoding)
if not output_writer:
output_writer = StdoutOutputWriter(encoding=preferred_encoding)
self._data_location = None
self._debug_mode = False
self._encode_errors = 'strict'
self._input_reader = input_reader
self._log_file = None
self._output_writer = output_writer
self._preferred_time_zone = None
self._quiet_mode = False
self._views_format_type = views.ViewsFactory.FORMAT_TYPE_CLI
self.list_timezones = False
self.preferred_encoding = preferred_encoding
| 287,607 |
Enforces a process memory limit.
Args:
memory_limit (int): maximum number of bytes the process is allowed
to allocate, where 0 represents no limit and None a default of
4 GiB.
|
def _EnforceProcessMemoryLimit(self, memory_limit):
# Resource is not supported on Windows.
if resource:
if memory_limit is None:
memory_limit = 4 * 1024 * 1024 * 1024
elif memory_limit == 0:
memory_limit = resource.RLIM_INFINITY
resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))
| 287,609 |
Parses the informational options.
Args:
options (argparse.Namespace): command line arguments.
|
def _ParseInformationalOptions(self, options):
self._debug_mode = getattr(options, 'debug', False)
self._quiet_mode = getattr(options, 'quiet', False)
if self._debug_mode and self._quiet_mode:
logger.warning(
'Cannot use debug and quiet mode at the same time, defaulting to '
'debug output.')
| 287,610 |
Parses the log file options.
Args:
options (argparse.Namespace): command line arguments.
|
def _ParseLogFileOptions(self, options):
self._log_file = self.ParseStringOption(options, 'log_file')
if not self._log_file:
local_date_time = datetime.datetime.now()
self._log_file = (
'{0:s}-{1:04d}{2:02d}{3:02d}T{4:02d}{5:02d}{6:02d}.log.gz').format(
self.NAME, local_date_time.year, local_date_time.month,
local_date_time.day, local_date_time.hour, local_date_time.minute,
local_date_time.second)
| 287,611 |
Parses the timezone options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
def _ParseTimezoneOption(self, options):
time_zone_string = self.ParseStringOption(options, 'timezone')
if isinstance(time_zone_string, py2to3.STRING_TYPES):
if time_zone_string.lower() == 'list':
self.list_timezones = True
elif time_zone_string:
try:
pytz.timezone(time_zone_string)
except pytz.UnknownTimeZoneError:
raise errors.BadConfigOption(
'Unknown time zone: {0:s}'.format(time_zone_string))
self._preferred_time_zone = time_zone_string
| 287,612 |
Prompts user for an input.
Args:
input_text (str): text used for prompting the user for input.
Returns:
str: input read from the user.
|
def _PromptUserForInput(self, input_text):
self._output_writer.Write('{0:s}: '.format(input_text))
return self._input_reader.Read()
| 287,613 |
Adds the basic options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
def AddBasicOptions(self, argument_group):
version_string = self.GetVersionInformation()
# We want a custom help message and not the default argparse one.
argument_group.add_argument(
'-h', '--help', action='help',
help='Show this help message and exit.')
argument_group.add_argument(
'--troubles', dest='show_troubleshooting', action='store_true',
default=False, help='Show troubleshooting information.')
argument_group.add_argument(
'-V', '--version', dest='version', action='version',
version=version_string, help='Show the version information.')
| 287,614 |
Adds the informational options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
def AddInformationalOptions(self, argument_group):
argument_group.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='Enable debug output.')
argument_group.add_argument(
'-q', '--quiet', dest='quiet', action='store_true', default=False,
help='Disable informational output.')
| 287,615 |
Adds the log file option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
def AddLogFileOptions(self, argument_group):
argument_group.add_argument(
'--logfile', '--log_file', '--log-file', action='store',
metavar='FILENAME', dest='log_file', type=str, default='', help=(
'Path of the file in which to store log messages, by default '
'this file will be named: "{0:s}-YYYYMMDDThhmmss.log.gz". Note '
'that the file will be gzip compressed if the extension is '
'".gz".').format(self.NAME))
| 287,616 |
Adds the time zone option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
def AddTimeZoneOption(self, argument_group):
# Note the default here is None so we can determine if the time zone
# option was set.
argument_group.add_argument(
'-z', '--zone', '--timezone', dest='timezone', action='store',
type=str, default=None, help=(
'explicitly define the timezone. Typically the timezone is '
'determined automatically where possible otherwise it will '
'default to UTC. Use "-z list" to see a list of available '
'timezones.'))
| 287,617 |
Parses a numeric option.
If the option is not set the default value is returned.
Args:
options (argparse.Namespace): command line arguments.
name (str): name of the numeric option.
base (Optional[int]): base of the numeric value.
default_value (Optional[object]): default value.
Returns:
int: numeric value.
Raises:
BadConfigOption: if the options are invalid.
|
def ParseNumericOption(self, options, name, base=10, default_value=None):
numeric_value = getattr(options, name, None)
if not numeric_value:
return default_value
try:
return int(numeric_value, base)
except (TypeError, ValueError):
name = name.replace('_', ' ')
raise errors.BadConfigOption(
'Unsupported numeric value {0:s}: {1!s}.'.format(
name, numeric_value))
| 287,620 |
Initializes an input reader.
Args:
encoding (Optional[str]): input encoding.
|
def __init__(self, encoding='utf-8'):
super(CLIInputReader, self).__init__()
self._encoding = encoding
| 287,623 |
Initializes an output writer.
Args:
encoding (Optional[str]): output encoding.
|
def __init__(self, encoding='utf-8'):
super(CLIOutputWriter, self).__init__()
self._encoding = encoding
| 287,624 |
Initializes a file object command line interface input reader.
Args:
file_object (file): file-like object to read from.
encoding (Optional[str]): input encoding.
|
def __init__(self, file_object, encoding='utf-8'):
super(FileObjectInputReader, self).__init__(encoding=encoding)
self._errors = 'strict'
self._file_object = file_object
| 287,625 |
Initializes an stdin input reader.
Args:
encoding (Optional[str]): input encoding.
|
def __init__(self, encoding='utf-8'):
super(StdinInputReader, self).__init__(sys.stdin, encoding=encoding)
| 287,627 |
Initializes a file object command line interface output writer.
Args:
file_object (file): file-like object to read from.
encoding (Optional[str]): output encoding.
|
def __init__(self, file_object, encoding='utf-8'):
super(FileObjectOutputWriter, self).__init__(encoding=encoding)
self._errors = 'strict'
self._file_object = file_object
| 287,628 |
Writes a string to the output.
Args:
string (str): output.
|
def Write(self, string):
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = codecs.encode(string, self._encoding, self._errors)
except UnicodeEncodeError:
if self._errors == 'strict':
logger.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._errors = 'replace'
encoded_string = codecs.encode(string, self._encoding, self._errors)
self._file_object.write(encoded_string)
| 287,629 |
Initializes a stdout output writer.
Args:
encoding (Optional[str]): output encoding.
|
def __init__(self, encoding='utf-8'):
super(StdoutOutputWriter, self).__init__(sys.stdout, encoding=encoding)
| 287,630 |
Writes a string to the output.
Args:
string (str): output.
|
def Write(self, string):
if sys.version_info[0] < 3:
super(StdoutOutputWriter, self).Write(string)
else:
# sys.stdout.write() on Python 3 by default will error if string is
# of type bytes.
sys.stdout.write(string)
| 287,631 |
Checks the availability of sqlite3.
Args:
verbose_output (Optional[bool]): True if output should be verbose.
Returns:
bool: True if the sqlite3 Python module is available, False otherwise.
|
def _CheckSQLite3(verbose_output=True):
# On Windows sqlite3 can be provided by both pysqlite2.dbapi2 and
# sqlite3. sqlite3 is provided with the Python installation and
# pysqlite2.dbapi2 by the pysqlite2 Python module. Typically
# pysqlite2.dbapi2 would contain a newer version of sqlite3, hence
# we check for its presence first.
module_name = 'pysqlite2.dbapi2'
minimum_version = '3.7.8'
module_object = _ImportPythonModule(module_name)
if not module_object:
module_name = 'sqlite3'
module_object = _ImportPythonModule(module_name)
if not module_object:
print('[FAILURE]\tmissing: {0:s}.'.format(module_name))
return False
module_version = getattr(module_object, 'sqlite_version', None)
if not module_version:
return False
# Split the version string and convert every digit into an integer.
# A string compare of both version strings will yield an incorrect result.
module_version_map = list(
map(int, _VERSION_SPLIT_REGEX.split(module_version)))
minimum_version_map = list(
map(int, _VERSION_SPLIT_REGEX.split(minimum_version)))
if module_version_map < minimum_version_map:
print((
'[FAILURE]\t{0:s} version: {1!s} is too old, {2!s} or later '
'required.').format(module_name, module_version, minimum_version))
return False
if verbose_output:
print('[OK]\t\t{0:s} version: {1!s}'.format(module_name, module_version))
return True
| 287,633 |
Imports a Python module.
Args:
module_name (str): name of the module.
Returns:
module: Python module or None if the module cannot be imported.
|
def _ImportPythonModule(module_name):
try:
module_object = list(map(__import__, [module_name]))[0]
except ImportError:
return None
# If the module name contains dots get the upper most module object.
if '.' in module_name:
for submodule_name in module_name.split('.')[1:]:
module_object = getattr(module_object, submodule_name, None)
return module_object
| 287,634 |
Checks the availability of the dependencies.
Args:
verbose_output (Optional[bool]): True if output should be verbose.
Returns:
bool: True if the dependencies are available, False otherwise.
|
def CheckDependencies(verbose_output=True):
print('Checking availability and versions of dependencies.')
check_result = True
for module_name, version_tuple in sorted(PYTHON_DEPENDENCIES.items()):
if not _CheckPythonModule(
module_name, version_tuple[0], version_tuple[1],
is_required=version_tuple[3], maximum_version=version_tuple[2],
verbose_output=verbose_output):
check_result = False
if not _CheckSQLite3(verbose_output=verbose_output):
check_result = False
if check_result and not verbose_output:
print('[OK]')
print('')
return check_result
| 287,635 |
Initializes a cache address.
Args:
cache_address (int): cache address.
|
def __init__(self, cache_address):
super(CacheAddress, self).__init__()
self.block_number = None
self.block_offset = None
self.block_size = None
self.filename = None
self.value = cache_address
if cache_address & 0x80000000:
self.is_initialized = 'True'
else:
self.is_initialized = 'False'
self.file_type = (cache_address & 0x70000000) >> 28
if not cache_address == 0x00000000:
if self.file_type == self.FILE_TYPE_SEPARATE:
file_selector = cache_address & 0x0fffffff
self.filename = 'f_{0:06x}'.format(file_selector)
elif self.file_type in self._BLOCK_DATA_FILE_TYPES:
file_selector = (cache_address & 0x00ff0000) >> 16
self.filename = 'data_{0:d}'.format(file_selector)
file_block_size = self._FILE_TYPE_BLOCK_SIZES[self.file_type]
self.block_number = cache_address & 0x0000ffff
self.block_size = (cache_address & 0x03000000) >> 24
self.block_size *= file_block_size
self.block_offset = 8192 + (self.block_number * file_block_size)
| 287,636 |
Parses the index table.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: if the index table cannot be read.
|
def _ParseIndexTable(self, file_object):
cache_address_map = self._GetDataTypeMap('uint32le')
file_offset = file_object.get_offset()
cache_address_data = file_object.read(4)
while len(cache_address_data) == 4:
try:
value = self._ReadStructureFromByteStream(
cache_address_data, file_offset, cache_address_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to map cache address at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if value:
cache_address = CacheAddress(value)
self.index_table.append(cache_address)
file_offset += 4
cache_address_data = file_object.read(4)
| 287,639 |
Parses a file-like object.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: when the file cannot be parsed.
|
def ParseFileObject(self, parser_mediator, file_object):
try:
self._ParseFileHeader(file_object)
except errors.ParseError as exception:
raise errors.ParseError(
'Unable to parse index file header with error: {0!s}'.format(
exception))
# Skip over the LRU data, which is 112 bytes in size.
file_object.seek(112, os.SEEK_CUR)
self._ParseIndexTable(file_object)
| 287,640 |
Parses the file header.
Args:
file_object (dfvfs.FileIO): a file-like object to parse.
Raises:
ParseError: if the file header cannot be read.
|
def _ParseFileHeader(self, file_object):
file_header_map = self._GetDataTypeMap(
'chrome_cache_data_block_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse data block file header with error: {0!s}'.format(
exception))
if file_header.signature != self._FILE_SIGNATURE:
raise errors.ParseError('Unsupported data block file signature')
format_version = '{0:d}.{1:d}'.format(
file_header.major_version, file_header.minor_version)
if format_version not in ('2.0', '2.1'):
raise errors.ParseError(
'Unsupported data block file format version: {0:s}'.format(
format_version))
if file_header.block_size not in (256, 1024, 4096):
raise errors.ParseError(
'Unsupported data block file block size: {0:d}'.format(
file_header.block_size))
| 287,641 |
Parses a cache entry.
Args:
file_object (dfvfs.FileIO): a file-like object to read from.
block_offset (int): block offset of the cache entry.
Returns:
CacheEntry: cache entry.
Raises:
ParseError: if the cache entry cannot be read.
|
def ParseCacheEntry(self, file_object, block_offset):
cache_entry_map = self._GetDataTypeMap('chrome_cache_entry')
try:
cache_entry, _ = self._ReadStructureFromFileObject(
file_object, block_offset, cache_entry_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse cache entry at offset: 0x{0:08x} with error: '
'{1!s}').format(block_offset, exception))
cache_entry_object = CacheEntry()
cache_entry_object.hash = cache_entry.hash
cache_entry_object.next = CacheAddress(cache_entry.next_address)
cache_entry_object.rankings_node = CacheAddress(
cache_entry.rankings_node_address)
cache_entry_object.creation_time = cache_entry.creation_time
byte_array = cache_entry.key
byte_string = bytes(bytearray(byte_array))
cache_entry_object.key, _, _ = byte_string.partition(b'\x00')
try:
cache_entry_object.original_url = cache_entry_object.key.decode('ascii')
except UnicodeDecodeError as exception:
raise errors.ParseError(
'Unable to decode original URL in key with error: {0!s}'.format(
exception))
return cache_entry_object
| 287,642 |
Parses Chrome Cache file entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
index_table (list[CacheAddress]): the cache addresses which are stored in
the index file.
data_block_files (dict[str: file]): look up table for the data block
file-like object handles.
|
def _ParseCacheEntries(self, parser_mediator, index_table, data_block_files):
# Parse the cache entries in the data block files.
for cache_address in index_table:
cache_address_chain_length = 0
while cache_address.value != 0:
if cache_address_chain_length >= 64:
parser_mediator.ProduceExtractionWarning(
'Maximum allowed cache address chain length reached.')
break
data_block_file_object = data_block_files.get(
cache_address.filename, None)
if not data_block_file_object:
message = 'Cache address: 0x{0:08x} missing data file.'.format(
cache_address.value)
parser_mediator.ProduceExtractionWarning(message)
break
try:
cache_entry = self._data_block_file_parser.ParseCacheEntry(
data_block_file_object, cache_address.block_offset)
except (IOError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse cache entry with error: {0!s}'.format(
exception))
break
event_data = ChromeCacheEntryEventData()
event_data.original_url = cache_entry.original_url
date_time = dfdatetime_webkit_time.WebKitTime(
timestamp=cache_entry.creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
cache_address = cache_entry.next
cache_address_chain_length += 1
| 287,644 |
Parses a Chrome Cache index table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_system (dfvfs.FileSystem): file system.
file_entry (dfvfs.FileEntry): file entry.
index_table (list[CacheAddress]): the cache addresses which are stored in
the index file.
|
def _ParseIndexTable(
self, parser_mediator, file_system, file_entry, index_table):
# Build a lookup table for the data block files.
path_segments = file_system.SplitPath(file_entry.path_spec.location)
data_block_files = {}
for cache_address in index_table:
if cache_address.filename not in data_block_files:
# Remove the previous filename from the path segments list and
# add one of the data block files.
path_segments.pop()
path_segments.append(cache_address.filename)
# We need to pass only used arguments to the path specification
# factory otherwise it will raise.
kwargs = {}
if file_entry.path_spec.parent:
kwargs['parent'] = file_entry.path_spec.parent
kwargs['location'] = file_system.JoinPath(path_segments)
data_block_file_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.path_spec.TYPE_INDICATOR, **kwargs)
try:
data_block_file_entry = path_spec_resolver.Resolver.OpenFileEntry(
data_block_file_path_spec)
except RuntimeError as exception:
message = (
'Unable to open data block file: {0:s} with error: '
'{1!s}'.format(kwargs['location'], exception))
parser_mediator.ProduceExtractionWarning(message)
data_block_file_entry = None
if not data_block_file_entry:
message = 'Missing data block file: {0:s}'.format(
cache_address.filename)
parser_mediator.ProduceExtractionWarning(message)
data_block_file_object = None
else:
data_block_file_object = data_block_file_entry.GetFileObject()
try:
self._data_block_file_parser.ParseFileObject(
parser_mediator, data_block_file_object)
except (IOError, errors.ParseError) as exception:
message = (
'Unable to parse data block file: {0:s} with error: '
'{1!s}').format(cache_address.filename, exception)
parser_mediator.ProduceExtractionWarning(message)
data_block_file_object.close()
data_block_file_object = None
data_block_files[cache_address.filename] = data_block_file_object
try:
self._ParseCacheEntries(
parser_mediator, index_table, data_block_files)
finally:
for data_block_file_object in iter(data_block_files.values()):
if data_block_file_object:
data_block_file_object.close()
| 287,645 |
Parses Chrome Cache files.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): file entry.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
def ParseFileEntry(self, parser_mediator, file_entry):
index_file_parser = ChromeCacheIndexFileParser()
file_object = file_entry.GetFileObject()
try:
index_file_parser.ParseFileObject(parser_mediator, file_object)
except (IOError, errors.ParseError) as exception:
file_object.close()
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format(
self.NAME, display_name, exception))
# TODO: create event based on index file creation time.
try:
file_system = file_entry.GetFileSystem()
self._ParseIndexTable(
parser_mediator, file_system, file_entry,
index_file_parser.index_table)
finally:
file_object.close()
| 287,646 |
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
hashers = cls._ParseStringOption(
options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING)
hasher_file_size_limit = cls._ParseNumericOption(
options, 'hasher_file_size_limit', default_value=0)
# TODO: validate hasher names.
if hasher_file_size_limit < 0:
raise errors.BadConfigOption(
'Invalid hasher file size limit value cannot be negative.')
setattr(configuration_object, '_hasher_names_string', hashers)
setattr(
configuration_object, '_hasher_file_size_limit', hasher_file_size_limit)
| 287,647 |
Initializes an Excel Spreadsheet (XLSX) output module.
Args:
output_mediator (OutputMediator): output mediator.
|
def __init__(self, output_mediator):
super(XLSXOutputModule, self).__init__(output_mediator)
self._column_widths = {}
self._current_row = 0
self._dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)
self._fields = self._DEFAULT_FIELDS
self._filename = None
self._sheet = None
self._timestamp_format = self._DEFAULT_TIMESTAMP_FORMAT
self._workbook = None
| 287,648 |
Formats the date to a datetime object without timezone information.
Note: timezone information must be removed due to lack of support
by xlsxwriter and Excel.
Args:
event (EventObject): event.
Returns:
datetime.datetime|str: date and time value or a string containing
"ERROR" on OverflowError.
|
def _FormatDateTime(self, event):
try:
datetime_object = datetime.datetime(
1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
datetime_object += datetime.timedelta(microseconds=event.timestamp)
datetime_object.astimezone(self._output_mediator.timezone)
return datetime_object.replace(tzinfo=None)
except (OverflowError, ValueError) as exception:
self._ReportEventError(event, (
'unable to copy timestamp: {0!s} to a human readable date and time '
'with error: {1!s}. Defaulting to: "ERROR"').format(
event.timestamp, exception))
return 'ERROR'
| 287,649 |
Removes illegal characters for XML.
If the input is not a string it will be returned unchanged.
Args:
xml_string (str): XML with possible illegal characters.
Returns:
str: XML where all illegal characters have been removed.
|
def _RemoveIllegalXMLCharacters(self, xml_string):
if not isinstance(xml_string, py2to3.STRING_TYPES):
return xml_string
return self._ILLEGAL_XML_RE.sub('\ufffd', xml_string)
| 287,650 |
Writes the body of an event object to the spreadsheet.
Args:
event (EventObject): event.
|
def WriteEventBody(self, event):
for field_name in self._fields:
if field_name == 'datetime':
output_value = self._FormatDateTime(event)
else:
output_value = self._dynamic_fields_helper.GetFormattedField(
event, field_name)
output_value = self._RemoveIllegalXMLCharacters(output_value)
# Auto adjust the column width based on the length of the output value.
column_index = self._fields.index(field_name)
self._column_widths.setdefault(column_index, 0)
if field_name == 'datetime':
column_width = min(
self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2)
else:
column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2)
self._column_widths[column_index] = max(
self._MIN_COLUMN_WIDTH, self._column_widths[column_index],
column_width)
self._sheet.set_column(
column_index, column_index, self._column_widths[column_index])
if (field_name == 'datetime'
and isinstance(output_value, datetime.datetime)):
self._sheet.write_datetime(
self._current_row, column_index, output_value)
else:
self._sheet.write(self._current_row, column_index, output_value)
self._current_row += 1
| 287,652 |
Normalize date time parsed format to an ISO 8601 date time string.
The date and time values in Apache access log files are formatted as:
"[18/Sep/2011:19:18:28 -0400]".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string.
|
def _GetISO8601String(self, structure):
time_offset = structure.time_offset
month = timelib.MONTH_DICT.get(structure.month.lower(), 0)
try:
time_offset_hours = int(time_offset[1:3], 10)
time_offset_minutes = int(time_offset[3:5], 10)
except (IndexError, TypeError, ValueError) as exception:
raise ValueError(
'unable to parse time zone offset with error: {0!s}.'.format(
exception))
try:
date_time_string = (
'{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.000000'
'{6:s}{7:02d}:{8:02d}').format(
structure.year, month, structure.day, structure.hours,
structure.minutes, structure.seconds, time_offset[0],
time_offset_hours, time_offset_minutes)
except ValueError as exception:
raise ValueError(
'unable to format date time string with error: {0!s}.'.format(
exception))
return date_time_string
| 287,655 |
Parses a matching entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): elements parsed from the file.
Raises:
ParseError: when the structure type is unknown.
|
def ParseRecord(self, parser_mediator, key, structure):
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
date_time = dfdatetime_time_elements.TimeElements()
try:
iso_date_time = self._GetISO8601String(structure.date_time)
date_time.CopyFromStringISO8601(iso_date_time)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_RECORDED)
event_data = ApacheAccessEventData()
event_data.ip_address = structure.ip_address
event_data.remote_name = structure.remote_name
event_data.user_name = structure.user_name
event_data.http_request = structure.http_request
event_data.http_response_code = structure.response_code
event_data.http_response_bytes = structure.response_bytes
if key == 'combined_log_format':
event_data.http_request_referer = structure.referer
event_data.http_request_user_agent = structure.user_agent
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,656 |
Verifies that this is an apache access log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
|
def VerifyStructure(self, parser_mediator, line):
return max([parser.matches(line) for _, parser in self.LINE_STRUCTURES])
| 287,657 |
Parses account information.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row with account information.
|
def ParseAccountInformation(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
display_name = self._GetRowValue(query_hash, row, 'given_displayname')
fullname = self._GetRowValue(query_hash, row, 'fullname')
# TODO: Move this to the formatter, and ensure username is rendered
# properly when fullname and/or display_name is None.
username = '{0!s} <{1!s}>'.format(fullname, display_name)
event_data = SkypeAccountEventData()
event_data.country = self._GetRowValue(query_hash, row, 'country')
event_data.display_name = display_name
event_data.email = self._GetRowValue(query_hash, row, 'emails')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.username = username
timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Authenticate Request')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Online')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Mood Event')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Used')
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,663 |
Parses a chat message.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
|
def ParseChat(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
participants = self._GetRowValue(query_hash, row, 'participants')
author = self._GetRowValue(query_hash, row, 'author')
dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner')
from_displayname = self._GetRowValue(query_hash, row, 'from_displayname')
accounts = []
participants = participants.split(' ')
for participant in participants:
if participant != author:
accounts.append(participant)
to_account = ', '.join(accounts)
if not to_account:
to_account = dialog_partner or 'Unknown User'
from_account = '{0:s} <{1:s}>'.format(from_displayname, author)
event_data = SkypeChatEventData()
event_data.from_account = from_account
event_data.query = query
event_data.text = self._GetRowValue(query_hash, row, 'body_xml')
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.to_account = to_account
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,664 |
Parses an SMS.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
|
def ParseSMS(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms')
if phone_number:
phone_number = phone_number.replace(' ', '')
event_data = SkypeSMSEventData()
event_data.number = phone_number
event_data.query = query
event_data.text = self._GetRowValue(query_hash, row, 'msg_sms')
timestamp = self._GetRowValue(query_hash, row, 'time_sms')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'SMS from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,665 |
Parses a call.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
query (Optional[str]): query.
|
def ParseCall(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
guid = self._GetRowValue(query_hash, row, 'guid')
is_incoming = self._GetRowValue(query_hash, row, 'is_incoming')
videostatus = self._GetRowValue(query_hash, row, 'videostatus')
try:
aux = guid
if aux:
aux_list = aux.split('-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = 'Unknown [no GUID]'
dst_aux = 'Unknown [no GUID]'
except IndexError:
src_aux = 'Unknown [{0:s}]'.format(guid)
dst_aux = 'Unknown [{0:s}]'.format(guid)
if is_incoming == '0':
user_start_call = True
source = src_aux
ip_address = self._GetRowValue(query_hash, row, 'ip_address')
if ip_address:
destination = '{0:s} <{1:s}>'.format(dst_aux, ip_address)
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
call_identifier = self._GetRowValue(query_hash, row, 'id')
event_data = SkypeCallEventData()
event_data.dst_call = destination
event_data.offset = call_identifier
event_data.query = query
event_data.src_call = source
event_data.user_start_call = user_start_call
event_data.video_conference = videostatus == '3'
timestamp = self._GetRowValue(query_hash, row, 'try_call')
event_data.call_type = 'WAITING'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
timestamp = self._GetRowValue(query_hash, row, 'accept_call')
timestamp = int(timestamp)
except (ValueError, TypeError):
timestamp = None
if timestamp:
event_data.call_type = 'ACCEPTED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
call_duration = self._GetRowValue(query_hash, row, 'call_duration')
call_duration = int(call_duration)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
'unable to determine when call: {0:s} was finished.'.format(
call_identifier))
call_duration = None
if call_duration:
timestamp += call_duration
event_data.call_type = 'FINISHED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,666 |
Parses a file transfer.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database.
|
def ParseFileTransfer(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
query_hash = hash(query)
source_dict = cache.GetResults('source')
if not source_dict:
results = database.Query(self.QUERY_SOURCE_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults('source')
dest_dict = cache.GetResults('destination')
if not dest_dict:
results = database.Query(self.QUERY_DEST_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults('destination')
source = 'Unknown'
destination = 'Unknown'
parent_id = self._GetRowValue(query_hash, row, 'parent_id')
partner_dispname = self._GetRowValue(query_hash, row, 'partner_dispname')
partner_handle = self._GetRowValue(query_hash, row, 'partner_handle')
if parent_id:
destination = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)
skype_id, skype_name = source_dict.get(parent_id, [None, None])
if skype_name:
source = '{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)
pk_id = self._GetRowValue(query_hash, row, 'pk_id')
if pk_id:
skype_id, skype_name = dest_dict.get(pk_id, [None, None])
if skype_name:
destination = '{0:s} <{1:s}>'.format(skype_id, skype_name)
filename = self._GetRowValue(query_hash, row, 'filename')
filesize = self._GetRowValue(query_hash, row, 'filesize')
try:
file_size = int(filesize, 10)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
'unable to convert file size: {0!s} of file: {1:s}'.format(
filesize, filename))
file_size = 0
event_data = SkypeTransferFileEventData()
event_data.destination = destination
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.source = source
event_data.transferred_filename = filename
event_data.transferred_filepath = self._GetRowValue(
query_hash, row, 'filepath')
event_data.transferred_filesize = file_size
status = self._GetRowValue(query_hash, row, 'status')
starttime = self._GetRowValue(query_hash, row, 'starttime')
if status == 2:
if starttime:
event_data.action_type = 'SENDSOLICITUDE'
date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
elif status == 8:
if starttime:
event_data.action_type = 'GETSOLICITUDE'
date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
accepttime = self._GetRowValue(query_hash, row, 'accepttime')
if accepttime:
event_data.action_type = 'ACCEPTED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=accepttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
finishtime = self._GetRowValue(query_hash, row, 'finishtime')
if finishtime:
event_data.action_type = 'FINISHED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=finishtime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
| 287,667 |
Initializes process information.
Args:
pid (int): process identifier (PID).
Raises:
IOError: If the process identified by the PID does not exist.
OSError: If the process identified by the PID does not exist.
|
def __init__(self, pid):
if not psutil.pid_exists(pid):
raise IOError('Process with PID: {0:d} does not exist'.format(pid))
self._process = psutil.Process(pid)
| 287,668 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.