docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Writes a collection of `Hash` observed for particular path. Args: client_path: A `ClientPath` instance. hash_entries: A dictionary with timestamps as keys and `Hash` instances as values.
def WritePathHashHistory(self, client_path, hash_entries): client_path_history = ClientPathHistory() for timestamp, hash_entry in iteritems(hash_entries): client_path_history.AddHashEntry(timestamp, hash_entry) self.MultiWritePathHistory({client_path: client_path_history})
131,457
Reads a collection of hash and stat entry for given path. Args: client_id: An identifier string for a client. path_type: A type of a path to retrieve path history for. components: A tuple of path components corresponding to path to retrieve information for. Returns: A list of `rdf_objects.PathInfo` ordered by timestamp in ascending order.
def ReadPathInfoHistory(self, client_id, path_type, components): histories = self.ReadPathInfosHistories(client_id, path_type, [components]) return histories[components]
131,458
BatchProcessor constructor. Args: batch_size: All the values will be processed in batches of this size. threadpool_prefix: Prefix that will be used in thread pool's threads names. threadpool_size: Size of a thread pool that will be used. If threadpool_size is 0, no threads will be used and all conversions will be done in the current thread.
def __init__(self, batch_size=1000, threadpool_prefix="batch_processor", threadpool_size=10): super(BatchConverter, self).__init__() self.batch_size = batch_size self.threadpool_prefix = threadpool_prefix self.threadpool_size = threadpool_size
131,583
Add the repack config filename onto the base output directory. This allows us to repack lots of different configs to the same installer name and still be able to distinguish them. Args: base_dir: output directory string config_filename: the secondary config filename string Returns: String to be used as output directory for this repack.
def GetOutputDir(self, base_dir, config_filename): return os.path.join(base_dir, os.path.basename(config_filename.replace(".yaml", "")))
131,591
Detects paths in a given string. Args: str_in: String where the paths should be detected. Returns: A list of paths (as strings) detected inside the given string.
def Detect(self, str_in): components = SplitIntoComponents(str_in) extracted_paths = set() for extractor in self.extractors: extracted_paths.update(extractor.Extract(components)) results = set(extracted_paths) for post_processor in self.post_processors: processed_results = set() for result in results: processed_results.update(post_processor.Process(result)) results = processed_results return results
131,628
Opens multiple files specified by given path-specs. See documentation for `VFSOpen` for more information. Args: pathspecs: A list of pathspec instances of files to open. progress_callback: A callback function to call to notify about progress Returns: A context manager yielding file-like objects.
def VFSMultiOpen(pathspecs, progress_callback=None): precondition.AssertIterableType(pathspecs, rdf_paths.PathSpec) vfs_open = functools.partial(VFSOpen, progress_callback=progress_callback) return context.MultiContext(map(vfs_open, pathspecs))
131,631
Read from the VFS and return the contents. Args: pathspec: path to read from offset: number of bytes to skip length: number of bytes to read progress_callback: A callback to indicate that the open call is still working but needs more time. Returns: VFS file contents
def ReadVFS(pathspec, offset, length, progress_callback=None): fd = VFSOpen(pathspec, progress_callback=progress_callback) fd.Seek(offset) return fd.Read(length)
131,632
Writes a single row to the underlying buffer. Args: values: A list of string values to be inserted into the CSV output.
def WriteRow(self, values): precondition.AssertIterableType(values, text) if compatibility.PY2: self._csv.writerow([value.encode("utf-8") for value in values]) else: self._csv.writerow(values)
131,639
Writes a single row to the underlying buffer. Args: values: A dictionary mapping column names to values to be inserted into the CSV output.
def WriteRow(self, values): precondition.AssertDictType(values, text, text) row = [] for column in self._columns: try: value = values[column] except KeyError: raise ValueError("Row does not contain required column `%s`" % column) row.append(value) self._writer.WriteRow(row)
131,642
Creates an object copy by serializing/deserializing it. RDFStruct.Copy() doesn't deep-copy repeated fields which may lead to hard to catch bugs. Args: obj: RDFValue to be copied. Returns: A deep copy of the passed RDFValue.
def _DeepCopy(self, obj): precondition.AssertType(obj, rdfvalue.RDFValue) return obj.__class__.FromSerializedString(obj.SerializeToString())
131,665
Applies instant output plugin to a multi-type collection. Args: plugin: InstantOutputPlugin instance. output_collection: MultiTypeCollection instance. source_urn: If not None, override source_urn for collection items. This has to be used when exporting flow results - their GrrMessages don't have "source" attribute set. Yields: Bytes chunks, as generated by the plugin.
def ApplyPluginToMultiTypeCollection(plugin, output_collection, source_urn=None): for chunk in plugin.Start(): yield chunk for stored_type_name in sorted(output_collection.ListStoredTypes()): stored_cls = rdfvalue.RDFValue.classes[stored_type_name] # pylint: disable=cell-var-from-loop def GetValues(): for timestamp, value in output_collection.ScanByType(stored_type_name): _ = timestamp if source_urn: value.source = source_urn yield value # pylint: enable=cell-var-from-loop for chunk in plugin.ProcessValues(stored_cls, GetValues): yield chunk for chunk in plugin.Finish(): yield chunk
131,669
Applies instant output plugin to a collection of results. Args: plugin: InstantOutputPlugin instance. type_names: List of type names (strings) to be processed. fetch_fn: Function that takes a type name as an argument and returns available items (FlowResult) corresponding to this type. Items are returned as a generator Yields: Bytes chunks, as generated by the plugin.
def ApplyPluginToTypedCollection(plugin, type_names, fetch_fn): for chunk in plugin.Start(): yield chunk def GetValues(tn): for v in fetch_fn(tn): yield v for type_name in sorted(type_names): stored_cls = rdfvalue.RDFValue.classes[type_name] for chunk in plugin.ProcessValues(stored_cls, functools.partial(GetValues, type_name)): yield chunk for chunk in plugin.Finish(): yield chunk
131,670
OutputPlugin constructor. Args: source_urn: URN identifying source of the data (hunt or flow). token: Security token. Raises: ValueError: If one of the keyword arguments is empty.
def __init__(self, source_urn=None, token=None): super(InstantOutputPlugin, self).__init__() if not source_urn: raise ValueError("source_urn can't be empty.") if not token: raise ValueError("token can't be empty.") self.source_urn = source_urn self.token = token
131,672
Generates converted values using given converter from given messages. Groups values in batches of BATCH_SIZE size and applies the converter to each batch. Args: converter: ExportConverter instance. grr_messages: An iterable (a generator is assumed) with GRRMessage values. Yields: Values generated by the converter. Raises: ValueError: if any of the GrrMessage objects doesn't have "source" set.
def _GenerateConvertedValues(self, converter, grr_messages): for batch in collection.Batch(grr_messages, self.BATCH_SIZE): metadata_items = self._GetMetadataForClients([gm.source for gm in batch]) batch_with_metadata = zip(metadata_items, [gm.payload for gm in batch]) for result in converter.BatchConvert( batch_with_metadata, token=self.token): yield result
131,677
Merge path info records. Merges src into self. Args: src: An rdfvalues.objects.PathInfo record, will be merged into self. Raises: ValueError: If src does not represent the same path.
def UpdateFrom(self, src): if not isinstance(src, PathInfo): raise TypeError("expected `%s` but got `%s`" % (PathInfo, type(src))) if self.path_type != src.path_type: raise ValueError( "src [%s] does not represent the same path type as self [%s]" % (src.path_type, self.path_type)) if self.components != src.components: raise ValueError("src [%s] does not represent the same path as self [%s]" % (src.components, self.components)) if src.HasField("stat_entry"): self.stat_entry = src.stat_entry self.last_stat_entry_timestamp = max(self.last_stat_entry_timestamp, src.last_stat_entry_timestamp) self.directory = self.directory or src.directory
131,700
Initializes a TimeRange. Args: start: An RDFDatetime that indicates the beginning of the time-range. end: An RDFDatetime that indicates the end of the time-range. Raises: ValueError: If the beginning of the time range is at a future time as compared to the end of the time-range.
def __init__(self, start, end): if start > end: raise ValueError( "Invalid time-range: %s > %s." % (start.AsMicrosecondsSinceEpoch(), end.AsMicrosecondsSinceEpoch())) self._start = start self._end = end
131,707
Create a timeseries with an optional initializer. Args: initializer: An optional Timeseries to clone. Raises: RuntimeError: If initializer is not understood.
def __init__(self, initializer=None): if initializer is None: self.data = [] return if isinstance(initializer, Timeseries): self.data = copy.deepcopy(initializer.data) return raise RuntimeError("Unrecognized initializer.")
131,739
Adds value at timestamp. Values must be added in order of increasing timestamp. Args: value: An observed value. timestamp: The timestamp at which value was observed. Raises: RuntimeError: If timestamp is smaller than the previous timstamp.
def Append(self, value, timestamp): timestamp = self._NormalizeTime(timestamp) if self.data and timestamp < self.data[-1][1]: raise RuntimeError("Next timestamp must be larger.") self.data.append([value, timestamp])
131,741
Adds multiple value<->timestamp pairs. Args: value_timestamp_pairs: Tuples of (value, timestamp).
def MultiAppend(self, value_timestamp_pairs): for value, timestamp in value_timestamp_pairs: self.Append(value, timestamp)
131,742
Filter the series to lie between start_time and stop_time. Removes all values of the series which are outside of some time range. Args: start_time: If set, timestamps before start_time will be dropped. stop_time: If set, timestamps at or past stop_time will be dropped.
def FilterRange(self, start_time=None, stop_time=None): start_time = self._NormalizeTime(start_time) stop_time = self._NormalizeTime(stop_time) self.data = [ p for p in self.data if (start_time is None or p[1] >= start_time) and (stop_time is None or p[1] < stop_time) ]
131,743
Add other to self pointwise. Requires that both self and other are of the same length, and contain identical timestamps. Typically this means that Normalize has been called on both with identical time parameters. Args: other: The sequence to add to self. Raises: RuntimeError: other does not contain the same timestamps as self.
def Add(self, other): if len(self.data) != len(other.data): raise RuntimeError("Can only add series of identical lengths.") for i in range(len(self.data)): if self.data[i][1] != other.data[i][1]: raise RuntimeError("Timestamp mismatch.") if self.data[i][0] is None and other.data[i][0] is None: continue self.data[i][0] = (self.data[i][0] or 0) + (other.data[i][0] or 0)
131,747
Take the data and yield results that passed through the filters. The output of each filter is added to a result set. So long as the filter selects, but does not modify, raw data, the result count will remain accurate. Args: raw_data: An iterable series of rdf values. Returns: A list of rdf values that matched at least one filter.
def Parse(self, raw_data): self.results = set() if not self.filters: self.results.update(raw_data) else: for f in self.filters: self.results.update(f.Parse(raw_data)) return list(self.results)
131,765
Take the results and yield results that passed through the filters. The output of each filter is used as the input for successive filters. Args: raw_data: An iterable series of rdf values. Returns: A list of rdf values that matched all filters.
def Parse(self, raw_data): self.results = raw_data for f in self.filters: self.results = f.Parse(self.results) return self.results
131,766
Return an initialized filter. Only initialize filters once. Args: filter_name: The name of the filter, as a string. Returns: an initialized instance of the filter. Raises: DefinitionError if the type of filter has not been defined.
def GetFilter(cls, filter_name): # Check if the filter is defined in the registry. try: filt_cls = cls.GetPlugin(filter_name) except KeyError: raise DefinitionError("Filter %s does not exist." % filter_name) return filt_cls()
131,767
Parse one or more objects by testing if it has matching stat results. Args: objs: An iterable of objects that should be checked. expression: A StatFilter expression, e.g.: "uid:>0 gid:=0 file_type:link" Yields: matching objects.
def ParseObjs(self, objs, expression): self.Validate(expression) for obj in objs: if not isinstance(obj, rdf_client_fs.StatEntry): continue # If all match conditions pass, yield the object. for match in self.matchers: if not match(obj): break else: yield obj
131,784
Validates that a parsed rule entry is valid for fschecker. Args: expression: A rule expression. Raises: DefinitionError: If the filter definition could not be validated. Returns: True if the expression validated OK.
def Validate(self, expression): parsed = self._Load(expression) if not parsed: raise DefinitionError("Empty StatFilter expression.") bad_keys = set(parsed) - self._KEYS if bad_keys: raise DefinitionError("Invalid parameters: %s" % ",".join(bad_keys)) if self.cfg.mask and not self.cfg.mode: raise DefinitionError("mode can only be set when mask is also defined.") if self.cfg.mask: if len(self.cfg.mask) > 1: raise DefinitionError("Too many mask values defined.") if not self._PERM_RE.match(self.cfg.mask[0]): raise DefinitionError("mask=%s is not octal, e.g. 0600" % self.cfg.mask) if self.cfg.mode: if len(self.cfg.mode) > 1: raise DefinitionError("Too many mode values defined.") if not self._PERM_RE.match(self.cfg.mode[0]): raise DefinitionError("mode=%s is not octal, e.g. 0600" % self.cfg.mode) if self.cfg.gid: for gid in self.cfg.gid: matched = self._UID_GID_RE.match(gid) if not matched: raise DefinitionError("gid: %s is not an integer preceded by " "!, >, < or =." % gid) if self.cfg.uid: for uid in self.cfg.uid: matched = self._UID_GID_RE.match(uid) if not matched: raise DefinitionError("uid: %s is not an integer preceded by " "!, >, < or =." % uid) if self.cfg.file_re: if len(self.cfg.file_re) > 1: raise DefinitionError("Too many regexes defined: %s" % self.cfg.file_re) try: self.file_re = re.compile(self.cfg.file_re[0]) except (re.error, TypeError) as e: raise DefinitionError("Invalid file regex: %s" % e) if self.cfg.path_re: if len(self.cfg.path_re) > 1: raise DefinitionError("Too many regexes defined: %s" % self.cfg.path_re) try: self.path_re = re.compile(self.cfg.path_re[0]) except (re.error, TypeError) as e: raise DefinitionError("Invalid path regex: %s" % e) if self.cfg.file_type: if len(self.cfg.file_type) > 1: raise DefinitionError( "Too many file types defined: %s" % self.cfg.file_type) file_type = self.cfg.file_type[0].upper() if file_type not in self._TYPES: raise DefinitionError("Unsupported file type %s" % file_type) self._Initialize() if not self.matchers: raise DefinitionError("StatFilter has no actions: %s" % expression) return True
131,785
A compatibility wrapper for setting object's name. See documentation for `GetName` for more information. Args: obj: A type or function object to set the name for. name: A name to set.
def SetName(obj, name): # Not doing type assertion on obj, since it may be a mock object used # in tests. precondition.AssertType(name, str) if PY2: obj.__name__ = name.encode("ascii") else: obj.__name__ = name
131,791
A compatibility wrapper for listing class attributes. This method solves similar Python 2 compatibility issues for `dir` function as `GetName` does for `__name__` invocations. See documentation for `GetName` for more details. Once support for Python 2 is dropped all invocations of this function should be replaced with ordinary `dir` calls. Args: cls: A class object to list the attributes for. Returns: A list of attribute names as unicode strings.
def ListAttrs(cls): precondition.AssertType(cls, type) if PY2: # TODO(user): once https://github.com/google/pytype/issues/127 is fixed, # pytype should be able to tell that this line is unreachable in py3. return [item.decode("ascii") for item in dir(cls)] # pytype: disable=attribute-error else: return dir(cls)
131,792
A compatibility wrapper for the `strftime` function. It is guaranteed to always take unicode string as an argument and return an unicode string as a result. Args: fmt: A format string specifying formatting of the output. stime: A time representation as returned by `gmtime` or `localtime`. Returns: A human-readable representation of `stime`.
def FormatTime(fmt, stime = None): precondition.AssertType(fmt, str) precondition.AssertOptionalType(stime, time.struct_time) # TODO(hanuszczak): https://github.com/google/pytype/issues/127 # pytype: disable=wrong-arg-types # We need this because second parameter is not a keyword argument, so method # must be explicitly called with or without it. if stime is None: strftime = time.strftime else: strftime = lambda fmt: time.strftime(fmt, stime) if PY2: return strftime(fmt.encode("ascii")).decode("ascii") else: return strftime(fmt)
131,794
A wrapper for `shlex.split` that works with unicode objects. Args: string: A unicode string to split. Returns: A list of unicode strings representing parts of the input string.
def ShlexSplit(string): precondition.AssertType(string, Text) if PY2: string = string.encode("utf-8") parts = shlex.split(string) if PY2: # TODO(hanuszczak): https://github.com/google/pytype/issues/127 # pytype: disable=attribute-error parts = [part.decode("utf-8") for part in parts] # pytype: enable=attribute-error return parts
131,795
A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable.
def Environ(variable, default): precondition.AssertType(variable, Text) value = os.environ.get(variable, default) if value is None: return default if PY2: # TODO(hanuszczak): https://github.com/google/pytype/issues/127 value = value.decode("utf-8") # pytype: disable=attribute-error return value
131,796
Stringifies a Python object into its JSON representation. Args: obj: A Python object to convert to JSON. sort_keys: If True, output dictionaries keys in sorted (ascending) order. encoder: An (optional) encoder class to use. Returns: A JSON representation of the given object.
def Dump(obj, sort_keys = False, encoder = None): # Python 2 json.dumps expects separators as a tuple of bytes, while # Python 3 expects them to be a tuple of unicode strings. Pytype # is too dumb to infer the result of the if statement that sets # _SEPARATORS and complains when running in Python 3 mode. text = json.dumps( obj, indent=2, sort_keys=sort_keys, ensure_ascii=False, cls=encoder, separators=_SEPARATORS) # pytype: disable=wrong-arg-types # `text` is an instance of `bytes` if the object to serialize does not contain # any unicode characters, otherwise it is `unicode`. See [1] for details. # # [1]: https://bugs.python.org/issue13769 if compatibility.PY2 and isinstance(text, bytes): text = text.decode("utf-8") # pytype: disable=attribute-error return text
131,797
Return RDFDatetime from string like 20140825162259.000000-420. Args: timestr: WMI time string Returns: rdfvalue.RDFDatetime We have some timezone manipulation work to do here because the UTC offset is in minutes rather than +-HHMM
def WMITimeStrToRDFDatetime(self, timestr): # We use manual parsing here because the time functions provided (datetime, # dateutil) do not properly deal with timezone information. offset_minutes = timestr[21:] year = timestr[:4] month = timestr[4:6] day = timestr[6:8] hours = timestr[8:10] minutes = timestr[10:12] seconds = timestr[12:14] microseconds = timestr[15:21] unix_seconds = calendar.timegm( tuple(map(int, [year, month, day, hours, minutes, seconds]))) unix_seconds -= int(offset_minutes) * 60 return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds))
131,814
Creates a ConnectionPool. Args: connect_func: A closure which returns a new connection to the underlying database, i.e. a MySQLdb.Connection. Should raise or block if the database is unavailable. max_size: The maximum number of simultaneous connections.
def __init__(self, connect_func, max_size=10): self.connect_func = connect_func self.limiter = threading.BoundedSemaphore(max_size) self.idle_conns = [] # Atomic access only!! self.closed = False
131,830
Gets a connection. Args: blocking: Whether to block when max_size connections are already in use. If false, may return None. Returns: A connection to the database. Raises: PoolAlreadyClosedError: if close() method was already called on this pool.
def get(self, blocking=True): if self.closed: raise PoolAlreadyClosedError("Connection pool is already closed.") # NOTE: Once we acquire capacity from the semaphore, it is essential that we # return it eventually. On success, this responsibility is delegated to # _ConnectionProxy. if not self.limiter.acquire(blocking=blocking): return None c = None # pop is atomic, but if we did a check first, it would not be atomic with # the pop. try: c = self.idle_conns.pop() except IndexError: # Create a connection, release the pool allocation if it fails. try: c = self.connect_func() except Exception: self.limiter.release() raise return _ConnectionProxy(self, c)
131,831
Terminates a flow and all of its children. Args: client_id: Client ID of a flow to terminate. flow_id: Flow ID of a flow to terminate. reason: String with a termination reason. flow_state: Flow state to be assigned to a flow after termination. Defaults to FlowState.ERROR.
def TerminateFlow(client_id, flow_id, reason=None, flow_state=rdf_flow_objects.Flow.FlowState.ERROR): to_terminate = [data_store.REL_DB.ReadFlowObject(client_id, flow_id)] while to_terminate: next_to_terminate = [] for rdf_flow in to_terminate: _TerminateFlow(rdf_flow, reason=reason, flow_state=flow_state) next_to_terminate.extend( data_store.REL_DB.ReadChildFlowObjects(rdf_flow.client_id, rdf_flow.flow_id)) to_terminate = next_to_terminate
131,848
Allows this flow to send a message to its parent flow. If this flow does not have a parent, the message is ignored. Args: response: An RDFValue() instance to be sent to the parent. tag: If specified, tag the result with this tag. Raises: ValueError: If responses is not of the correct type.
def SendReply(self, response, tag=None): if not isinstance(response, rdfvalue.RDFValue): raise ValueError("SendReply can only send RDFValues") if self.rdf_flow.parent_flow_id: response = rdf_flow_objects.FlowResponse( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), payload=response, flow_id=self.rdf_flow.parent_flow_id, tag=tag) self.flow_responses.append(response) else: reply = rdf_flow_objects.FlowResult( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, payload=response, tag=tag) self.replies_to_write.append(reply) self.replies_to_process.append(reply) self.rdf_flow.num_replies_sent += 1
131,854
Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string
def Log(self, format_str, *args): log_entry = rdf_flow_objects.FlowLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=format_str % args) data_store.REL_DB.WriteFlowLogEntries([log_entry]) if self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args)
131,861
Completes the request by calling the state method. Args: method_name: The name of the state method to call. request: A RequestState protobuf. responses: A list of FlowMessages responding to the request.
def RunStateMethod(self, method_name, request=None, responses=None): if self.rdf_flow.pending_termination: self.Error(error_message=self.rdf_flow.pending_termination.reason) return client_id = self.rdf_flow.client_id deadline = self.rdf_flow.processing_deadline if deadline and rdfvalue.RDFDatetime.Now() > deadline: raise flow.FlowError("Processing time for flow %s on %s expired." % (self.rdf_flow.flow_id, self.rdf_flow.client_id)) self.rdf_flow.current_state = method_name if request and responses: logging.debug("Running %s for flow %s on %s, %d responses.", method_name, self.rdf_flow.flow_id, client_id, len(responses)) else: logging.debug("Running %s for flow %s on %s", method_name, self.rdf_flow.flow_id, client_id) try: try: method = getattr(self, method_name) except AttributeError: raise ValueError("Flow %s has no state method %s" % (self.__class__.__name__, method_name)) # Prepare a responses object for the state method to use: responses = flow_responses.Responses.FromResponses( request=request, responses=responses) if responses.status is not None: self.SaveResourceUsage(responses.status) stats_collector_instance.Get().IncrementCounter("grr_worker_states_run") if method_name == "Start": stats_collector_instance.Get().IncrementCounter( "flow_starts", fields=[self.rdf_flow.flow_class_name]) method() else: method(responses) if self.replies_to_process: if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id: self._ProcessRepliesWithHuntOutputPlugins(self.replies_to_process) else: self._ProcessRepliesWithFlowOutputPlugins(self.replies_to_process) self.replies_to_process = [] # We don't know here what exceptions can be thrown in the flow but we have # to continue. Thus, we catch everything. except Exception as e: # pylint: disable=broad-except # This flow will terminate now stats_collector_instance.Get().IncrementCounter( "flow_errors", fields=[self.rdf_flow.flow_class_name]) logging.exception("Flow %s on %s raised %s.", self.rdf_flow.flow_id, client_id, utils.SmartUnicode(e)) self.Error( error_message=utils.SmartUnicode(e), backtrace=traceback.format_exc())
131,862
Saves signed blobs to the datastore. If a signed binary with the given URN already exists, its contents will get overwritten. Args: binary_urn: RDFURN that should serve as a unique identifier for the binary. blobs: An Iterable of signed blobs to write to the datastore. token: ACL token to use with the legacy (non-relational) datastore.
def WriteSignedBinaryBlobs(binary_urn, blobs, token = None): if _ShouldUseLegacyDatastore(): aff4.FACTORY.Delete(binary_urn, token=token) with data_store.DB.GetMutationPool() as mutation_pool: with aff4.FACTORY.Create( binary_urn, collects.GRRSignedBlob, mode="w", mutation_pool=mutation_pool, token=token) as fd: for blob in blobs: fd.Add(blob, mutation_pool=mutation_pool) if data_store.RelationalDBEnabled(): blob_references = rdf_objects.BlobReferences() current_offset = 0 for blob in blobs: blob_id = data_store.BLOBS.WriteBlobWithUnknownHash( blob.SerializeToString()) blob_references.items.Append( rdf_objects.BlobReference( offset=current_offset, size=len(blob.data), blob_id=blob_id)) current_offset += len(blob.data) data_store.REL_DB.WriteSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn), blob_references)
131,876
Deletes the binary with the given urn from the datastore. Args: binary_urn: RDFURN that serves as a unique identifier for the binary. token: ACL token to use with the legacy (non-relational) datastore. Raises: SignedBinaryNotFoundError: If the signed binary does not exist.
def DeleteSignedBinary(binary_urn, token = None): if _ShouldUseLegacyDatastore(): try: aff4.FACTORY.Open( binary_urn, aff4_type=aff4.AFF4Stream, mode="r", token=token) except aff4.InstantiationError: raise SignedBinaryNotFoundError(binary_urn) aff4.FACTORY.Delete(binary_urn, token=token) if data_store.RelationalDBEnabled(): try: data_store.REL_DB.ReadSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn)) except db.UnknownSignedBinaryError: if _ShouldUseLegacyDatastore(): # Migration of data isn't complete yet (we haven't started reading # exclusively from the relational DB), so this is probably ok. return else: raise SignedBinaryNotFoundError(binary_urn) data_store.REL_DB.DeleteSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn))
131,877
Returns URNs for all signed binaries in the datastore. Args: token: ACL token to use with the legacy (non-relational) datastore.
def FetchURNsForAllSignedBinaries(token ): if _ShouldUseLegacyDatastore(): urns = [] aff4_roots = [GetAFF4PythonHackRoot(), GetAFF4ExecutablesRoot()] for _, descendant_urns in aff4.FACTORY.RecursiveMultiListChildren( aff4_roots): urns.extend(descendant_urns) aff4_streams = aff4.FACTORY.MultiOpen( urns, aff4_type=collects.GRRSignedBlob, mode="r", token=token) return [stream.urn for stream in aff4_streams] else: return [ _SignedBinaryURNFromID(i) for i in data_store.REL_DB.ReadIDsForAllSignedBinaries() ]
131,878
Retrieves blobs for the given binary from the datastore. Args: binary_urn: RDFURN that uniquely identifies the binary. token: ACL token to use with the legacy (non-relational) datastore. Returns: A tuple containing an iterator for all the binary's blobs and an RDFDatetime representing when the binary's contents were saved to the datastore. Raises: SignedBinaryNotFoundError: If no signed binary with the given URN exists.
def FetchBlobsForSignedBinary( binary_urn, token = None ): if _ShouldUseLegacyDatastore(): try: aff4_stream = aff4.FACTORY.Open( binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token) except aff4.InstantiationError: raise SignedBinaryNotFoundError(binary_urn) timestamp = aff4_stream.Get(aff4_stream.Schema.TYPE).age return (blob for blob in aff4_stream), timestamp else: try: references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn)) except db.UnknownSignedBinaryError: raise SignedBinaryNotFoundError(binary_urn) blob_ids = [r.blob_id for r in references.items] raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids) blobs = ( rdf_crypto.SignedBlob.FromSerializedString(raw_blob) for raw_blob in raw_blobs) return blobs, timestamp
131,879
Returns the size of the given binary (in bytes). Args: binary_urn: RDFURN that uniquely identifies the binary. token: ACL token to use with the legacy (non-relational) datastore. Raises: SignedBinaryNotFoundError: If no signed binary with the given URN exists.
def FetchSizeOfSignedBinary(binary_urn, token = None ): if _ShouldUseLegacyDatastore(): try: aff4_stream = aff4.FACTORY.Open( binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token) return aff4_stream.size except aff4.InstantiationError: raise SignedBinaryNotFoundError(binary_urn) else: try: references, _ = data_store.REL_DB.ReadSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn)) except db.UnknownSignedBinaryError: raise SignedBinaryNotFoundError(binary_urn) last_reference = references.items[-1] return last_reference.offset + last_reference.size
131,880
Yields the contents of the given binary in chunks of the given size. Args: blob_iterator: An Iterator over all the binary's blobs. chunk_size: Size, in bytes, of the chunks to yield.
def StreamSignedBinaryContents(blob_iterator, chunk_size = 1024 ): all_blobs_read = False byte_buffer = io.BytesIO() while not all_blobs_read or byte_buffer.getvalue(): while not all_blobs_read and byte_buffer.tell() < chunk_size: try: blob = next(blob_iterator) except StopIteration: all_blobs_read = True break byte_buffer.write(blob.data) if byte_buffer.tell() > 0: # Yield a chunk of the signed binary and reset the buffer to contain # only data that hasn't been sent yet. byte_buffer.seek(0) yield byte_buffer.read(chunk_size) byte_buffer = io.BytesIO(byte_buffer.read()) byte_buffer.seek(0, io.SEEK_END)
131,881
Returns a string of column names for MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> Columns({"password": "foo", "name": "bar"}) u'(`name`, `password`)' Args: iterable: The iterable of strings to be used as column names. Returns: A string containing a tuple of sorted comma-separated column names.
def Columns(iterable): columns = sorted(iterable) return "({})".format(", ".join("`{}`".format(col) for col in columns))
131,886
Converts a list of path components to a canonical path representation. Args: components: A sequence of path components. Returns: A canonical MySQL path representation.
def ComponentsToPath(components): precondition.AssertIterableType(components, Text) for component in components: if not component: raise ValueError("Empty path component in: {}".format(components)) if "/" in component: raise ValueError("Path component with '/' in: {}".format(components)) if components: return "/" + "/".join(components) else: return ""
131,889
Converts a canonical path representation to a list of components. Args: path: A canonical MySQL path representation. Returns: A sequence of path components.
def PathToComponents(path): precondition.AssertType(path, Text) if path and not path.startswith("/"): raise ValueError("Path '{}' is not absolute".format(path)) if path: return tuple(path.split("/")[1:]) else: return ()
131,890
/etc/insserv.conf* entries define system facilities. Full format details are in man 8 insserv, but the basic structure is: $variable facility1 facility2 $second_variable facility3 $variable Any init script that specifies Required-Start: $second_variable needs to be expanded to facility1 facility2 facility3. Args: data: A string of insserv definitions.
def _ParseInsserv(self, data): p = config_file.FieldParser() entries = p.ParseEntries(data) raw = {e[0]: e[1:] for e in entries} # Now expand out the facilities to services. facilities = {} for k, v in iteritems(raw): # Remove interactive tags. k = k.replace("<", "").replace(">", "") facilities[k] = v for k, vals in iteritems(facilities): self.insserv[k] = [] for v in vals: self.insserv[k].extend(self._InsservExpander(facilities, v))
131,900
Connect to the given MySQL host and create a utf8mb4_unicode_ci database. Args: host: The hostname to connect to. port: The port to connect to. user: The username to connect as. password: The password to connect with. database: The database name to create. client_key_path: The path of the client private key file. client_cert_path: The path of the client public key certificate file. ca_cert_path: The path of the Certificate Authority (CA) certificate file.
def _SetupDatabase(host=None, port=None, user=None, password=None, database=None, client_key_path=None, client_cert_path=None, ca_cert_path=None): with contextlib.closing( _Connect( host=host, port=port, user=user, password=password, # No database should be specified in a connection that intends # to create a database. database=None, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path)) as conn: with contextlib.closing(conn.cursor()) as cursor: try: cursor.execute(CREATE_DATABASE_QUERY.format(database)) except MySQLdb.MySQLError as e: # Statement might fail if database exists, this is fine. if e.args[0] != mysql_error_constants.DB_CREATE_EXISTS: raise cursor.execute("USE {}".format(database)) _CheckCollation(cursor) def _MigrationConnect(): return _Connect( host=host, port=port, user=user, password=password, database=database, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path) mysql_migration.ProcessMigrations(_MigrationConnect, config.CONFIG["Mysql.migrations_dir"])
131,964
Creates a datastore implementation. Args: host: Passed to MySQLdb.Connect when creating a new connection. port: Passed to MySQLdb.Connect when creating a new connection. user: Passed to MySQLdb.Connect when creating a new connection. password: Passed to MySQLdb.Connect when creating a new connection. database: Passed to MySQLdb.Connect when creating a new connection.
def __init__(self, host=None, port=None, user=None, password=None, database=None): # Turn all SQL warnings not mentioned below into exceptions. warnings.filterwarnings("error", category=MySQLdb.Warning) for message in [ # We use INSERT IGNOREs which generate useless duplicate entry warnings. ".*Duplicate entry.*", # Same for CREATE TABLE IF NOT EXISTS. ".*Table '.*' already exists", # And CREATE INDEX IF NOT EXISTS. ".*Duplicate key name.*", # TODO: this is caused by an old version of the MySQLdb # library that doesn't wrap bytes SQL arguments with the _binary() # type hint. This issue should go away when a new version of the # MySQLdb is used with Python 3. ".*Invalid.*character string.*", ]: warnings.filterwarnings( "ignore", category=MySQLdb.Warning, message=message) self._connect_args = dict( host=host or config.CONFIG["Mysql.host"], port=port or config.CONFIG["Mysql.port"], user=user or config.CONFIG["Mysql.username"], password=password or config.CONFIG["Mysql.password"], database=database or config.CONFIG["Mysql.database"]) client_key_path = config.CONFIG["Mysql.client_key_path"] if client_key_path: logging.debug("Client key file configured, trying to use SSL.") self._connect_args["client_key_path"] = client_key_path self._connect_args["client_cert_path"] = config.CONFIG[ "Mysql.client_cert_path"] self._connect_args["ca_cert_path"] = config.CONFIG["Mysql.ca_cert_path"] _SetupDatabase(**self._connect_args) max_pool_size = config.CONFIG.Get("Mysql.conn_pool_max", 10) self.pool = mysql_pool.Pool(self._Connect, max_size=max_pool_size) self.handler_thread = None self.handler_stop = True self.flow_processing_request_handler_thread = None self.flow_processing_request_handler_stop = None self.flow_processing_request_handler_pool = ( threadpool.ThreadPool.Factory( "flow_processing_pool", min_threads=2, max_threads=50)) self.flow_processing_request_handler_pool.Start()
131,967
Handles messages from GRR clients received via Fleetspeak. This method updates the last-ping timestamp of the client before beginning processing. Args: fs_client_id: The Fleetspeak client-id for the client. grr_messages: An Iterable of GrrMessages.
def _ProcessGRRMessages(self, fs_client_id, grr_messages): grr_client_id = fleetspeak_utils.FleetspeakIDToGRRID(fs_client_id) for grr_message in grr_messages: grr_message.source = grr_client_id grr_message.auth_state = ( rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED) client_is_new = self.frontend.EnrolFleetspeakClient(client_id=grr_client_id) if not client_is_new and data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientMetadata( grr_client_id, last_ping=rdfvalue.RDFDatetime.Now()) self.frontend.ReceiveMessages( client_id=grr_client_id, messages=grr_messages)
131,994
Process rdf data through the filter. Filters sift data according to filter rules. Data that passes the filter rule is kept, other data is dropped. If no filter method is provided, the data is returned as a list. Otherwise, a items that meet filter conditions are returned in a list. Args: rdf_data: Host data that has already been processed by a Parser into RDF. Returns: A list containing data items that matched the filter rules.
def Parse(self, rdf_data): if self._filter: return list(self._filter.Parse(rdf_data, self.expression)) return rdf_data
132,005
Process rdf data through filters. Test if results match expectations. Processing of rdf data is staged by a filter handler, which manages the processing of host data. The output of the filters are compared against expected results. Args: rdf_data: An list containing 0 or more rdf values. Returns: An anomaly if data didn't match expectations. Raises: ProcessingError: If rdf_data is not a handled type.
def Parse(self, rdf_data): if not isinstance(rdf_data, (list, set)): raise ProcessingError("Bad host data format: %s" % type(rdf_data)) if self.baseline: comparison = self.baseliner.Parse(rdf_data) else: comparison = rdf_data found = self.handler.Parse(comparison) results = self.hint.Render(found) return self.matcher.Detect(comparison, results)
132,008
Runs probes that evaluate whether collected data has an issue. Args: conditions: The trigger conditions. host_data: A map of artifacts and rdf data. Returns: Anomalies if an issue exists.
def Parse(self, conditions, host_data): processed = [] probes = self.triggers.Calls(conditions) for p in probes: # Get the data required for the probe. A probe can use a result_context # (e.g. Parsers, Anomalies, Raw), to identify the data that is needed # from the artifact collection results. artifact_data = host_data.get(p.artifact) if not p.result_context: rdf_data = artifact_data["PARSER"] else: rdf_data = artifact_data.get(str(p.result_context)) try: result = p.Parse(rdf_data) except ProcessingError as e: raise ProcessingError("Bad artifact %s: %s" % (p.artifact, e)) if result: processed.append(result) # Matcher compares the number of probes that triggered with results. return self.matcher.Detect(probes, processed)
132,011
Determines if the check uses the specified artifact. Args: artifacts: Either a single artifact name, or a list of artifact names Returns: True if the check uses a specific artifact.
def UsesArtifact(self, artifacts): # If artifact is a single string, see if it is in the list of artifacts # as-is. Otherwise, test whether any of the artifacts passed in to this # function exist in the list of artifacts. if isinstance(artifacts, string_types): return artifacts in self.artifacts else: return any(True for artifact in artifacts if artifact in self.artifacts)
132,015
Runs methods that evaluate whether collected host_data has an issue. Args: conditions: A list of conditions to determine which Methods to trigger. host_data: A map of artifacts and rdf data. Returns: A CheckResult populated with Anomalies if an issue exists.
def Parse(self, conditions, host_data): result = CheckResult(check_id=self.check_id) methods = self.SelectChecks(conditions) result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods]) return result
132,016
Run host_data through detectors and return them if a detector triggers. Args: baseline: The base set of rdf values used to evaluate whether an issue exists. host_data: The rdf values passed back by the filters. Returns: A CheckResult message containing anomalies if any detectors identified an issue, None otherwise.
def Detect(self, baseline, host_data): result = CheckResult() for detector in self.detectors: finding = detector(baseline, host_data) if finding: result.ExtendAnomalies([finding]) if result: return result
132,019
Takes targeting info, identifies artifacts to fetch. Args: os_name: 0+ OS names. cpe: 0+ CPE identifiers. labels: 0+ GRR labels. restrict_checks: A list of check ids whose artifacts should be fetched. Returns: the artifacts that should be collected.
def SelectArtifacts(cls, os_name=None, cpe=None, labels=None, restrict_checks=None): results = set() for condition in cls.Conditions(None, os_name, cpe, labels): trigger = condition[1:] for chk in itervalues(cls.checks): if restrict_checks and chk.check_id not in restrict_checks: continue results.update(chk.triggers.Artifacts(*trigger)) return results
132,026
Ensures that given value has certain type. Args: value: A value to assert the type for. expected_type: An expected type for the given value. Raises: TypeError: If given value does not have the expected type.
def AssertType(value, expected_type): if not isinstance(value, expected_type): message = "Expected type `%r`, but got value `%r` of type `%s`" message %= (expected_type, value, type(value)) raise TypeError(message)
132,041
Ensures that given iterable container has certain type. Args: iterable: An iterable container to assert the type for. expected_item_type: An expected type of the container items. Raises: TypeError: If given container does is not an iterable or its items do not have the expected type.
def AssertIterableType(iterable, expected_item_type): # We do not consider iterators to be iterables even though Python does. An # "iterable" should be a type that can be iterated (that is: an iterator can # be constructed for them). Iterators should not be considered to be iterable # because it makes no sense to construct an iterator for iterator. The most # important practical implication is that act of iterating an iterator drains # it whereas act of iterating the iterable does not. if isinstance(iterable, collections.Iterator): message = "Expected iterable container but got iterator `%s` instead" message %= iterable raise TypeError(message) AssertType(iterable, collections.Iterable) for item in iterable: AssertType(item, expected_item_type)
132,042
Ensures that given dictionary is actually a dictionary of specified type. Args: dct: A dictionary to assert the type for. expected_key_type: An expected type for dictionary keys. expected_value_type: An expected type for dictionary values. Raises: TypeError: If given dictionary is not really a dictionary or not all its keys and values have the expected type.
def AssertDictType(dct, expected_key_type, expected_value_type): AssertType(dct, dict) for key, value in iteritems(dct): AssertType(key, expected_key_type) AssertType(value, expected_value_type)
132,043
Check that this approval applies to the given token. Args: start_stats: A list of lists, each containing two values (a timestamp and the number of clients started at this time). complete_stats: A list of lists, each containing two values (a timestamp and the number of clients completed at this time). Returns: A reference to the current instance to allow method chaining.
def InitFromDataPoints(self, start_stats, complete_stats): self.start_points = self._ConvertToResultList(start_stats) self.complete_points = self._ConvertToResultList(complete_stats) return self
132,090
Registers a new constructor in the factory. Args: name: A name associated with given constructor. constructor: A constructor function that creates instances. Raises: ValueError: If there already is a constructor associated with given name.
def Register(self, name, constructor): precondition.AssertType(name, Text) if name in self._constructors: message = "Duplicated constructors %r and %r for name '%s'" message %= (constructor, self._constructors[name], name) raise ValueError(message) self._constructors[name] = constructor
132,123
Unregisters a constructor. Args: name: A name of the constructor to unregister. Raises: ValueError: If constructor with specified name has never been registered.
def Unregister(self, name): precondition.AssertType(name, Text) try: del self._constructors[name] except KeyError: raise ValueError("Constructor with name '%s' is not registered" % name)
132,124
Creates a new instance. Args: name: A name identifying the constructor to use for instantiation. Returns: An instance of the type that the factory supports.
def Create(self, name): precondition.AssertType(name, Text) try: constructor = self._constructors[name] except KeyError: message = "No constructor for name '%s' has been registered" message %= name raise ValueError(message) instance = constructor() if not isinstance(instance, self._cls): message = ("Constructor %r for name '%s' returned instance of type %r " "(expected %r)") message %= (constructor, name, type(instance), self._cls) raise TypeError(message) return instance
132,125
Generates a summary about the path record. Args: timestamp: A point in time from which the data should be retrieved. Returns: A `rdf_objects.PathInfo` instance.
def GetPathInfo(self, timestamp=None): path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp) try: result = self._path_infos[path_info_timestamp].Copy() except KeyError: result = rdf_objects.PathInfo( path_type=self._path_type, components=self._components) stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries, timestamp) result.last_stat_entry_timestamp = stat_entry_timestamp result.stat_entry = self._stat_entries.get(stat_entry_timestamp) hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries, timestamp) result.last_hash_entry_timestamp = hash_entry_timestamp result.hash_entry = self._hash_entries.get(hash_entry_timestamp) return result
132,144
Searches for greatest timestamp lower than the specified one. Args: dct: A dictionary from timestamps to some items. upper_bound_timestamp: An upper bound for timestamp to be returned. Returns: Greatest timestamp that is lower than the specified one. If no such value exists, `None` is returned.
def _LastEntryTimestamp(dct, upper_bound_timestamp): if upper_bound_timestamp is None: upper_bound = lambda _: True else: upper_bound = lambda key: key <= upper_bound_timestamp try: return max(filter(upper_bound, iterkeys(dct))) except ValueError: # Thrown if `max` input (result of filtering) is empty. return None
132,145
Prepare bundle of artifacts and their dependencies for the client. Args: flow_args: An `ArtifactCollectorFlowArgs` instance. knowledge_base: contains information about the client Returns: rdf value object containing a list of extended artifacts and the knowledge base
def GetArtifactCollectorArgs(flow_args, knowledge_base): args = rdf_artifacts.ClientArtifactCollectorArgs() args.knowledge_base = knowledge_base args.apply_parsers = flow_args.apply_parsers args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors args.max_file_size = flow_args.max_file_size args.use_tsk = flow_args.use_tsk if not flow_args.recollect_knowledge_base: artifact_names = flow_args.artifact_list else: artifact_names = GetArtifactsForCollection(knowledge_base.os, flow_args.artifact_list) expander = ArtifactExpander(knowledge_base, flow_args.path_type, flow_args.max_file_size) for artifact_name in artifact_names: rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name) if not MeetsConditions(knowledge_base, rdf_artifact): continue if artifact_name in expander.processed_artifacts: continue requested_by_user = artifact_name in flow_args.artifact_list for expanded_artifact in expander.Expand(rdf_artifact, requested_by_user): args.artifacts.append(expanded_artifact) return args
132,160
Wrapper for the ArtifactArranger. Extend the artifact list by dependencies and sort the artifacts to resolve the dependencies. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names. Returns: A list of artifacts such that if they are collected in the given order their dependencies are resolved.
def GetArtifactsForCollection(os_name, artifact_list): artifact_arranger = ArtifactArranger(os_name, artifact_list) artifact_names = artifact_arranger.GetArtifactsInProperOrder() return artifact_names
132,162
Creates the nodes and directed edges of the dependency graph. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names.
def _InitializeGraph(self, os_name, artifact_list): dependencies = artifact_registry.REGISTRY.SearchDependencies( os_name, artifact_list) artifact_names, attribute_names = dependencies self._AddAttributeNodes(attribute_names) self._AddArtifactNodesAndEdges(artifact_names)
132,170
Add an edge for every dependency of the given artifact. This method gets the attribute names for a given artifact and for every attribute it adds a directed edge from the attribute node to the artifact node. If an artifact does not have any dependencies it is added to the set of reachable nodes. Args: rdf_artifact: The artifact object.
def _AddDependencyEdges(self, rdf_artifact): artifact_dependencies = artifact_registry.GetArtifactPathDependencies( rdf_artifact) if artifact_dependencies: for attribute in artifact_dependencies: self._AddEdge(attribute, rdf_artifact.name) else: self.reachable_nodes.add(rdf_artifact.name) self.graph[rdf_artifact.name].is_provided = True
132,173
Add an edge for every attribute the given artifact provides. This method adds a directed edge from the artifact node to every attribute this artifact provides. Args: rdf_artifact: The artifact object.
def _AddProvidesEdges(self, rdf_artifact): for attribute in rdf_artifact.provides: self._AddEdge(rdf_artifact.name, attribute)
132,174
Add a directed edge to the graph. Add the end to the list of outgoing nodes of the start and the start to the list of incoming nodes of the end node. Args: start_node: name of the start node end_node: name of the end node
def _AddEdge(self, start_node, end_node): self.graph[start_node].outgoing.append(end_node) # This check is necessary because an artifact can provide attributes that # are not covered by the graph because they are not relevant for the # requested artifacts. if end_node in self.graph: self.graph[end_node].incoming.append(start_node)
132,175
Parses the buffer as a prototypes. Args: buff: The buffer to parse. index: The position to start parsing. length: Optional length to parse until. Yields: Splits the buffer into tuples of strings: (encoded_tag, encoded_length, wire_format).
def SplitBuffer(buff, index=0, length=None): buffer_len = length or len(buff) while index < buffer_len: # data_index is the index where the data begins (i.e. after the tag). encoded_tag, data_index = ReadTag(buff, index) tag_type = ORD_MAP[encoded_tag[0]] & TAG_TYPE_MASK if tag_type == WIRETYPE_VARINT: # new_index is the index of the next tag. _, new_index = VarintReader(buff, data_index) yield (encoded_tag, b"", buff[data_index:new_index]) index = new_index elif tag_type == WIRETYPE_FIXED64: yield (encoded_tag, b"", buff[data_index:data_index + 8]) index = 8 + data_index elif tag_type == WIRETYPE_FIXED32: yield (encoded_tag, b"", buff[data_index:data_index + 4]) index = 4 + data_index elif tag_type == WIRETYPE_LENGTH_DELIMITED: # Start index of the string. length, start = VarintReader(buff, data_index) yield ( encoded_tag, buff[data_index:start], # Encoded length. buff[start:start + length]) # Raw data of element. index = start + length else: raise rdfvalue.DecodeError("Unexpected Tag.")
132,194
Gets entries of `RDFProtoStruct` in a well-defined order. Args: data: A raw data dictionary of `RDFProtoStruct`. Yields: Entries of the structured in a well-defined order.
def _GetOrderedEntries(data): # The raw data dictionary has two kinds of keys: strings (which correspond to # field name) or integers (if the name is unknown). In Python 3 it is not # possible to compare integers and strings to each other, so we first tag each # with either a 0 or 1 (so named fields are going to be serialized first) and # let the lexicographical ordering of the tuples take care of the rest. def Tag(field): # TODO: We use `string_types` here because in Python 2 # attribute names (which are passed e.g. through keyword arguments) are # represented as `bytes` whereas in Python 3 it is `unicode`. This should # be replaced with `str` once support for Python 2 is dropped. if isinstance(field, string_types): return 0, field if isinstance(field, int): return 1, field message = "Unexpected field '{}' of type '{}'".format(field, type(field)) raise TypeError(message) for field in sorted(iterkeys(data), key=Tag): yield data[field]
132,195
Late binding callback. This method is called on this field descriptor when the target RDFValue class is finally defined. It gives the field descriptor an opportunity to initialize after the point of definition. Args: target: The target nested class. Raises: TypeError: If the target class is not of the expected type.
def LateBind(self, target=None): if not issubclass(target, RDFProtoStruct): raise TypeError("Field %s expects a protobuf, but target is %s" % (self, target)) self.late_bound = False # The target type is now resolved. self.type = target # Register us in our owner. self.owner.AddDescriptor(self)
132,219
Initialize the type descriptor. We call the dynamic_method to know which type should be used to decode the embedded bytestream. Args: dynamic_cb: A callback to be used to return the class to parse the embedded data. We pass the callback our container. **kwargs: Passthrough.
def __init__(self, dynamic_cb=None, **kwargs): super(ProtoDynamicEmbedded, self).__init__(**kwargs) self._type = dynamic_cb
132,222
Convert to the wire format. Args: value: is of type RepeatedFieldHelper. Returns: A wire format representation of the value.
def ConvertToWireFormat(self, value): output = _SerializeEntries( (python_format, wire_format, value.type_descriptor) for (python_format, wire_format) in value.wrapped_list) return b"", b"", output
132,232
Sets the config file which will receive any modifications. The main config file can be made writable, but directing all Set() operations into a secondary location. This secondary location will receive any updates and will override the options for this file. Args: filename: A filename which will receive updates. The file is parsed first and merged into the raw data from this object.
def SetWriteBack(self, filename): try: self.writeback = self.LoadSecondaryConfig(filename) self.MergeData(self.writeback.RawData(), self.writeback_data) except IOError as e: # This means that we probably aren't installed correctly. logging.error("Unable to read writeback file: %s", e) return except Exception as we: # pylint: disable=broad-except # Could be yaml parse error, could be some malformed parameter. Move the # writeback file so that we start in a clean state next run if os.path.exists(filename): try: b = filename + ".bak" os.rename(filename, b) logging.warning("Broken writeback (%s) renamed to: %s", we, b) except Exception as e: # pylint: disable=broad-except logging.error("Unable to rename broken writeback: %s", e) raise we logging.debug("Configuration writeback is set to %s", filename)
132,308
Update the configuration option with a new value. Note that this forces the value to be set for all contexts. The value is written to the writeback location if Save() is later called. Args: name: The name of the parameter to set. value: The value to set it to. The value will be validated against the option's type descriptor. Raises: ConstModificationError: When attempting to change a constant option.
def Set(self, name, value): # If the configuration system has a write back location we use it, # otherwise we use the primary configuration object. if self.writeback is None: logging.warning("Attempting to modify a read only config object for %s.", name) if name in self.constants: raise ConstModificationError( "Attempting to modify constant value %s" % name) writeback_data = self.writeback_data # Check if the new value conforms with the type_info. if value is not None: if isinstance(value, Text): value = self.EscapeString(value) writeback_data[name] = value self.FlushCache()
132,313
Start a Windows service with the given name. Args: service_name: string The name of the service to be started.
def StartService(service_name): try: win32serviceutil.StartService(service_name) logging.info("Service '%s' started.", service_name) except pywintypes.error as e: if getattr(e, "winerror", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST: logging.debug("Tried to start '%s', but the service is not installed.", service_name) else: logging.exception("Encountered error trying to start '%s':", service_name)
132,355
Stop a Windows service with the given name. Args: service_name: string The name of the service to be stopped. service_binary_name: string If given, also kill this binary as a best effort fallback solution.
def StopService(service_name, service_binary_name=None): # QueryServiceStatus returns: scvType, svcState, svcControls, err, # svcErr, svcCP, svcWH try: status = win32serviceutil.QueryServiceStatus(service_name)[1] except pywintypes.error as e: if getattr(e, "winerror", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST: logging.debug("Tried to stop '%s', but the service is not installed.", service_name) else: logging.exception("Unable to query status of service '%s':", service_name) return for _ in range(20): if status == win32service.SERVICE_STOPPED: break elif status != win32service.SERVICE_STOP_PENDING: try: win32serviceutil.StopService(service_name) except pywintypes.error: logging.exception("Unable to stop service '%s':", service_name) time.sleep(1) status = win32serviceutil.QueryServiceStatus(service_name)[1] if status == win32service.SERVICE_STOPPED: logging.info("Service '%s' stopped.", service_name) return elif not service_binary_name: return # Taskkill will fail on systems predating Windows XP, this is a best # effort fallback solution. output = subprocess.check_output( ["taskkill", "/im", "%s*" % service_binary_name, "/f"], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE) logging.debug("%s", output) # Sleep a bit to ensure that process really quits. time.sleep(2)
132,356
Retry the BigQuery upload job. Using the same job id protects us from duplicating data on the server. If we fail all of our retries we raise. Args: job: BigQuery job object job_id: ID string for this upload job error: errors.HttpError object from the first error Returns: API response object on success, None on failure Raises: BigQueryJobUploadError: if we can't get the bigquery job started after retry_max_attempts
def RetryUpload(self, job, job_id, error): if self.IsErrorRetryable(error): retry_count = 0 sleep_interval = config.CONFIG["BigQuery.retry_interval"] while retry_count < config.CONFIG["BigQuery.retry_max_attempts"]: time.sleep(sleep_interval.seconds) logging.info("Retrying job_id: %s", job_id) retry_count += 1 try: response = job.execute() return response except errors.HttpError as e: if self.IsErrorRetryable(e): sleep_interval *= config.CONFIG["BigQuery.retry_multiplier"] logging.exception("Error with job: %s, will retry in %s", job_id, sleep_interval) else: raise BigQueryJobUploadError( "Can't retry error code %s. Giving up" " on job: %s." % (e.resp.status, job_id)) else: raise BigQueryJobUploadError("Can't retry error code %s. Giving up on " "job: %s." % (error.resp.status, job_id)) raise BigQueryJobUploadError( "Giving up on job:%s after %s retries." % (job_id, retry_count))
132,382
Insert data into a bigquery table. If the table specified doesn't exist, it will be created with the specified schema. Args: table_id: string table id fd: open file descriptor containing the newline separated JSON schema: BigQuery schema dict job_id: string job id Returns: API response object on success, None on failure
def InsertData(self, table_id, fd, schema, job_id): configuration = { "schema": { "fields": schema }, "destinationTable": { "projectId": self.project_id, "tableId": table_id, "datasetId": self.dataset_id }, "sourceFormat": "NEWLINE_DELIMITED_JSON", } body = { "configuration": { "load": configuration }, "jobReference": { "projectId": self.project_id, "jobId": job_id } } # File content can be gzipped for bandwidth efficiency. The server handles # it correctly without any changes to the request. mediafile = http.MediaFileUpload( fd.name, mimetype="application/octet-stream") job = self.service.jobs().insert( projectId=self.project_id, body=body, media_body=mediafile) try: response = job.execute() return response except errors.HttpError as e: if self.GetDataset(self.dataset_id): logging.exception("Error with job: %s", job_id) else: # If this is our first export ever, we need to create the dataset. logging.info("Attempting to create dataset: %s", self.dataset_id) self.CreateDataset() return self.RetryUpload(job, job_id, e)
132,383
Use TSK to read the pathspec. Args: base_fd: The file like object we read this component from. handlers: A mapping from rdf_paths.PathSpec.PathType to classes implementing VFSHandler. pathspec: An optional pathspec to open directly. progress_callback: A callback to indicate that the open call is still working but needs more time. Raises: IOError: If the file can not be opened.
def __init__(self, base_fd, handlers, pathspec=None, progress_callback=None): super(TSKFile, self).__init__( base_fd, handlers=handlers, pathspec=pathspec, progress_callback=progress_callback) if self.base_fd is None: raise IOError("TSK driver must have a file base.") # If our base is another tsk driver - borrow the reference to the raw # device, and replace the last pathspec component with this one after # extending its path. elif isinstance(base_fd, TSKFile) and self.base_fd.IsDirectory(): self.tsk_raw_device = self.base_fd.tsk_raw_device last_path = utils.JoinPath(self.pathspec.last.path, pathspec.path) # Replace the last component with this one. self.pathspec.Pop(-1) self.pathspec.Append(pathspec) self.pathspec.last.path = last_path # Use the base fd as a base to parse the filesystem only if its file like. elif not self.base_fd.IsDirectory(): self.tsk_raw_device = self.base_fd self.pathspec.Append(pathspec) else: # If we get here we have a directory from a non sleuthkit driver - dont # know what to do with it. raise IOError("Unable to parse base using Sleuthkit.") # If we are successful in opening this path below the path casing is # correct. self.pathspec.last.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL fd_hash = self.tsk_raw_device.pathspec.SerializeToString() # Cache the filesystem using the path of the raw device try: self.filesystem = DEVICE_CACHE.Get(fd_hash) self.fs = self.filesystem.fs except KeyError: self.img = MyImgInfo( fd=self.tsk_raw_device, progress_callback=progress_callback) self.fs = pytsk3.FS_Info(self.img, 0) self.filesystem = CachedFilesystem(self.fs, self.img) DEVICE_CACHE.Put(fd_hash, self.filesystem) # We prefer to open the file based on the inode because that is more # efficient. if pathspec.HasField("inode"): self.fd = self.fs.open_meta(pathspec.inode) self.tsk_attribute = self.GetAttribute(pathspec.ntfs_type, pathspec.ntfs_id) if self.tsk_attribute: self.size = self.tsk_attribute.info.size else: self.size = self.fd.info.meta.size else: # TODO: In Python 2 TSK expects bytestring paths whereas in # Python 3 it expects unicode paths. Once support for Python 2 is dropped, # this branching can be removed. if compatibility.PY2: path = self.pathspec.last.path.encode("utf-8") else: path = self.pathspec.last.path # Does the filename exist in the image? self.fd = self.fs.open(path) self.size = self.fd.info.meta.size self.pathspec.last.inode = self.fd.info.meta.addr
132,391
Adds an rdf value the queue. Adds an rdf value to a queue. Does not require that the queue be locked, or even open. NOTE: The caller is responsible for ensuring that the queue exists and is of the correct type. Args: queue_urn: The urn of the queue to add to. rdf_value: The rdf value to add to the queue. mutation_pool: A MutationPool object to write to. Raises: ValueError: rdf_value has unexpected type.
def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None): if not isinstance(rdf_value, cls.rdf_type): raise ValueError("This collection only accepts values of type %s." % cls.rdf_type.__name__) if mutation_pool is None: raise ValueError("Mutation pool can't be none.") timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch() if not isinstance(queue_urn, rdfvalue.RDFURN): queue_urn = rdfvalue.RDFURN(queue_urn) mutation_pool.QueueAddItem(queue_urn, rdf_value, timestamp)
132,410
Adds an rdf value to the queue. Adds an rdf value to the queue. Does not require that the queue be locked. Args: rdf_value: The rdf value to add to the queue. mutation_pool: A MutationPool object to write to. Raises: ValueError: rdf_value has unexpected type.
def Add(self, rdf_value, mutation_pool=None): self.StaticAdd(self.urn, rdf_value, mutation_pool=mutation_pool)
132,411
Refreshes claims on records identified by ids. Args: ids: A list of ids provided by ClaimRecords timeout: The new timeout for these claims. Raises: LockError: If the queue is not locked.
def RefreshClaims(self, ids, timeout="30m"): with data_store.DB.GetMutationPool() as mutation_pool: mutation_pool.QueueRefreshClaims(ids, timeout=timeout)
132,413
Delete records identified by ids. Args: ids: A list of ids provided by ClaimRecords. token: The database access token to delete with. Raises: LockError: If the queue is not locked.
def DeleteRecords(cls, ids, token): with data_store.DB.GetMutationPool() as mutation_pool: mutation_pool.QueueDeleteRecords(ids)
132,414
Release records identified by subjects. Releases any claim on the records identified by ids. Args: ids: A list of ids provided by ClaimRecords. token: The database access token to write with. Raises: LockError: If the queue is not locked.
def ReleaseRecords(cls, ids, token): with data_store.DB.GetMutationPool() as mutation_pool: mutation_pool.QueueReleaseRecords(ids)
132,415
Parse a string into a client URN. Convert case so that all URNs are of the form C.[0-9a-f]. Args: value: string value to parse
def ParseFromUnicode(self, value): precondition.AssertType(value, Text) value = value.strip() super(ClientURN, self).ParseFromUnicode(value) match = self.CLIENT_ID_RE.match(self._string_urn) if not match: raise type_info.TypeValueError("Client urn malformed: %s" % value) clientid = match.group("clientid") clientid_correctcase = "".join((clientid[0].upper(), clientid[1:].lower())) self._string_urn = self._string_urn.replace(clientid, clientid_correctcase, 1)
132,420
Add a relative stem to the current value and return a new RDFURN. Note that this returns an RDFURN, not a ClientURN since the resulting object would not pass validation. Args: path: A string containing a relative path. age: The age of the object. If None set to current time. Returns: A new RDFURN that can be chained. Raises: ValueError: if the path component is not a string.
def Add(self, path, age=None): if not isinstance(path, string_types): raise ValueError("Only strings should be added to a URN.") result = rdfvalue.RDFURN(self.Copy(age)) result.Update(path=utils.JoinPath(self._string_urn, path)) return result
132,423
Merge a user into existing users or add new if it doesn't exist. Args: kb_user: A User rdfvalue. Returns: A list of strings with the set attribute names, e.g. ["users.sid"]
def MergeOrAddUser(self, kb_user): user = self.GetUser( sid=kb_user.sid, uid=kb_user.uid, username=kb_user.username) new_attrs = [] merge_conflicts = [] # Record when we overwrite a value. if not user: new_attrs = self._CreateNewUser(kb_user) else: for key, val in iteritems(kb_user.AsDict()): if user.Get(key) and user.Get(key) != val: merge_conflicts.append((key, user.Get(key), val)) user.Set(key, val) new_attrs.append("users.%s" % key) return new_attrs, merge_conflicts
132,426
Retrieve a single record from the file. Args: offset: offset from start of input_dat where header starts record_size: length of the header according to file (untrusted) Returns: A dict containing a single browser history record.
def _GetRecord(self, offset, record_size): record_header = "<4sLQQL" get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0] url_offset = struct.unpack("B", self.input_dat[offset + 52:offset + 53])[0] if url_offset in [0xFF, 0xFE]: return None data_offset = get4(offset + 68) data_size = get4(offset + 72) start_pos = offset + data_offset data = struct.unpack("{0}s".format(data_size), self.input_dat[start_pos:start_pos + data_size])[0] fmt = record_header unknown_size = url_offset - struct.calcsize(fmt) fmt += "{0}s".format(unknown_size) fmt += "{0}s".format(record_size - struct.calcsize(fmt)) dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size]) header, blocks, mtime, ctime, ftime, _, url = dat url = url.split(b"\x00")[0].decode("utf-8") if mtime: mtime = mtime // 10 - WIN_UNIX_DIFF_MSECS if ctime: ctime = ctime // 10 - WIN_UNIX_DIFF_MSECS return { "header": header, # the header "blocks": blocks, # number of blocks "urloffset": url_offset, # offset of URL in file "data_offset": data_offset, # offset for start of data "data_size": data_size, # size of data "data": data, # actual data "mtime": mtime, # modified time "ctime": ctime, # created time "ftime": ftime, # file time "url": url # the url visited }
132,435
Wait until the flow completes. Args: timeout: timeout in seconds. None means default timeout (1 hour). 0 means no timeout (wait forever). Returns: Fresh flow object. Raises: PollTimeoutError: if timeout is reached. FlowFailedError: if the flow is not successful.
def WaitUntilDone(self, timeout=None): f = utils.Poll( generator=self.Get, condition=lambda f: f.data.state != f.data.RUNNING, timeout=timeout) if f.data.state != f.data.TERMINATED: raise errors.FlowFailedError( "Flow %s (%s) failed: %s" % (self.flow_id, self.client_id, f.data.context.current_state)) return f
132,456
Constructor for the Flow Runner. Args: flow_obj: The flow object this runner will run states for. parent_runner: The parent runner of this runner. runner_args: A FlowRunnerArgs() instance containing initial values. If not specified, we use the runner_args from the flow_obj. token: An instance of access_control.ACLToken security token.
def __init__(self, flow_obj, parent_runner=None, runner_args=None, token=None): self.token = token or flow_obj.token self.parent_runner = parent_runner # If we have a parent runner, we use its queue manager. if parent_runner is not None: self.queue_manager = parent_runner.queue_manager else: # Otherwise we use a new queue manager. self.queue_manager = queue_manager.QueueManager(token=self.token) self.queue_manager.FreezeTimestamp() self.queued_replies = [] self.outbound_lock = threading.Lock() self.flow_obj = flow_obj # Initialize from a new runner args proto. if runner_args is not None: self.runner_args = runner_args self.session_id = self.GetNewSessionID() self.flow_obj.urn = self.session_id # Flow state does not have a valid context, we need to create one. self.context = self.InitializeContext(runner_args) self.flow_obj.context = self.context self.context.session_id = self.session_id else: # Retrieve args from the flow object's context. The flow object is # responsible for storing our context, although they do not generally # access it directly. self.context = self.flow_obj.context self.runner_args = self.flow_obj.runner_args # Populate the flow object's urn with the session id. self.flow_obj.urn = self.session_id = self.context.session_id # Sent replies are cached so that they can be processed by output plugins # when the flow is saved. self.sent_replies = []
132,459
Completes the request by calling the state method. Args: method_name: The name of the state method to call. request: A RequestState protobuf. responses: A list of GrrMessages responding to the request.
def RunStateMethod(self, method_name, request=None, responses=None): if self._TerminationPending(): return client_id = None try: self.context.current_state = method_name if request and responses: client_id = request.client_id or self.runner_args.client_id logging.debug("%s Running %s with %d responses from %s", self.session_id, method_name, len(responses), client_id) else: logging.debug("%s Running state method %s", self.session_id, method_name) # Extend our lease if needed. self.flow_obj.HeartBeat() try: method = getattr(self.flow_obj, method_name) except AttributeError: raise FlowRunnerError("Flow %s has no state method %s" % (self.flow_obj.__class__.__name__, method_name)) # Prepare a responses object for the state method to use: responses = flow_responses.Responses.FromLegacyResponses( request=request, responses=responses) self.SaveResourceUsage(responses.status) stats_collector_instance.Get().IncrementCounter("grr_worker_states_run") if method_name == "Start": stats_collector_instance.Get().IncrementCounter( "flow_starts", fields=[self.flow_obj.Name()]) method() else: method(responses) if self.sent_replies: self.ProcessRepliesWithOutputPlugins(self.sent_replies) self.sent_replies = [] # We don't know here what exceptions can be thrown in the flow but we have # to continue. Thus, we catch everything. except Exception as e: # pylint: disable=broad-except # This flow will terminate now # TODO(user): Deprecate in favor of 'flow_errors'. stats_collector_instance.Get().IncrementCounter("grr_flow_errors") stats_collector_instance.Get().IncrementCounter( "flow_errors", fields=[self.flow_obj.Name()]) logging.exception("Flow %s raised %s.", self.session_id, e) self.Error(traceback.format_exc(), client_id=client_id)
132,470
Allows this flow to send a message to its parent flow. If this flow does not have a parent, the message is ignored. Args: response: An RDFValue() instance to be sent to the parent. tag: If specified, tag the result with the following tag. NOTE: supported in REL_DB implementation only. Raises: ValueError: If responses is not of the correct type.
def SendReply(self, response, tag=None): del tag if not isinstance(response, rdfvalue.RDFValue): raise ValueError("SendReply can only send a Semantic Value") # Only send the reply if we have a parent, indicated by knowing our parent's # request state. if self.runner_args.request_state.session_id: request_state = self.runner_args.request_state request_state.response_count += 1 # Make a response message msg = rdf_flows.GrrMessage( session_id=request_state.session_id, request_id=request_state.id, response_id=request_state.response_count, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, type=rdf_flows.GrrMessage.Type.MESSAGE, payload=response, args_rdf_name=response.__class__.__name__, args_age=int(response.age)) # Queue the response now self.queue_manager.QueueResponse(msg) if self.runner_args.write_intermediate_results: self.QueueReplyForResultCollection(response) else: # Only write the reply to the collection if we are the parent flow. self.QueueReplyForResultCollection(response)
132,474
Identify the type of hash in a hash string. Args: hash_str: A string value that may be a hash. Returns: A string description of the type of hash.
def GetHashType(self, hash_str): # Return the type of the first matching hash. for hash_type, hash_re in self.hashes: if hash_re.match(hash_str): return hash_type # No hash matched. return "EMPTY"
132,521