docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Renders metadata of a given value class. Args: value_cls: Metadata of this class will be rendered. This class has to be (or to be a subclass of) a self.value_class (i.e. a class that this renderer is capable of rendering). Returns: Dictionary with class metadata.
def BuildTypeDescriptor(self, value_cls): result = ApiRDFValueDescriptor( name=value_cls.__name__, parents=[klass.__name__ for klass in value_cls.__mro__], doc=value_cls.__doc__ or "", kind="PRIMITIVE") result.default = self.BuildDefaultValue(value_cls) return result
130,791
Takes the plist contents generated by binplist and returns a plain dict. binplist uses rich types to express some of the plist types. We need to convert them to types that RDFValueArray will be able to transport. Args: plist: A plist to convert. Returns: A simple python type.
def PlistValueToPlainValue(plist): if isinstance(plist, dict): ret_value = dict() for key, value in iteritems(plist): ret_value[key] = PlistValueToPlainValue(value) return ret_value elif isinstance(plist, list): return [PlistValueToPlainValue(value) for value in plist] elif isinstance(plist, datetime.datetime): return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond return plist
130,805
Parses a YAML source into a Python object. Args: text: A YAML source to parse. Returns: A Python data structure corresponding to the YAML source.
def Parse(text): precondition.AssertType(text, Text) if compatibility.PY2: text = text.encode("utf-8") return yaml.safe_load(text)
130,811
Parses many YAML documents into a list of Python objects. Args: text: A YAML source with multiple documents embedded. Returns: A list of Python data structures corresponding to the YAML documents.
def ParseMany(text): precondition.AssertType(text, Text) if compatibility.PY2: text = text.encode("utf-8") return list(yaml.safe_load_all(text))
130,812
Reads a Python object stored in a specified YAML file. Args: filepath: A filepath to the YAML file. Returns: A Python data structure corresponding to the YAML in the given file.
def ReadFromPath(filepath): with io.open(filepath, mode="r", encoding="utf-8") as filedesc: return ReadFromFile(filedesc)
130,813
Reads a Python object stored in a specified YAML file. Args: filepath: A filepath to the YAML file. Returns: A Python data structure corresponding to the YAML in the given file.
def ReadManyFromPath(filepath): with io.open(filepath, mode="r", encoding="utf-8") as filedesc: return ReadManyFromFile(filedesc)
130,814
Stringifies a Python object into its YAML representation. Args: obj: A Python object to convert to YAML. Returns: A YAML representation of the given object.
def Dump(obj): text = yaml.safe_dump(obj, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode("utf-8") return text
130,815
Stringifies a sequence of Python objects to a multi-document YAML. Args: objs: An iterable of Python objects to convert to YAML. Returns: A multi-document YAML representation of the given objects.
def DumpMany(objs): precondition.AssertIterableType(objs, object) text = yaml.safe_dump_all(objs, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode("utf-8") return text
130,816
Serializes and writes given Python object to the specified YAML file. Args: obj: A Python object to serialize. filepath: A path to the file into which the object is to be written.
def WriteToPath(obj, filepath): with io.open(filepath, mode="w", encoding="utf-8") as filedesc: WriteToFile(obj, filedesc)
130,817
Serializes and writes given Python objects to a multi-document YAML file. Args: objs: An iterable of Python objects to serialize. filepath: A path to the file into which the object is to be written.
def WriteManyToPath(objs, filepath): with io.open(filepath, mode="w", encoding="utf-8") as filedesc: WriteManyToFile(objs, filedesc)
130,818
Creates Windows paths detector. Commandline strings can be space separated and contain options. e.g. C:\\Program Files\\ACME Corporation\\wiz.exe /quiet /blah See here for microsoft doco on commandline parsing: http://msdn.microsoft.com/en-us/library/windows/desktop/ms682425(v=vs.85).aspx Args: vars_map: Environment variables mapping. Default is None. Returns: A detector (core.Detector instance).
def CreateWindowsRegistryExecutablePathsDetector(vars_map=None): return core.Detector( extractors=[RunDllExtractor(), ExecutableExtractor()], post_processors=[EnvVarsPostProcessor(vars_map or {})],)
130,822
Detects paths in a list of Windows Registry strings. Args: source_values: A list of strings to detect paths in. vars_map: Dictionary of "string" -> "string|list", i.e. a mapping of environment variables names to their suggested values or to lists of their suggested values. Yields: A list of detected paths (as strings).
def DetectExecutablePaths(source_values, vars_map=None): detector = CreateWindowsRegistryExecutablePathsDetector(vars_map=vars_map) for source_value in source_values: for result in detector.Detect(source_value): yield result
130,823
Extracts interesting paths from a given path. Args: components: Source string represented as a list of components. Returns: A list of extracted paths (as strings).
def Extract(self, components): rundll_index = -1 for index, component in enumerate(components): if component.lower().endswith("rundll32.exe"): rundll_index = index if rundll_index == -1: return [] components = components[(rundll_index + 1):] # We expect components after "rundll32.exe" to point at a DLL and a # function. For example: # rundll32.exe "C:\Windows\system32\advpack.dll",DelNodeRunDLL32 last_component = components[-1].rsplit(",", 1)[0] extracted_path = " ".join(components[0:-1] + [last_component]) return [extracted_path]
130,824
Extracts interesting paths from a given path. Args: components: Source string represented as a list of components. Returns: A list of extracted paths (as strings).
def Extract(self, components): for index, component in enumerate(components): if component.lower().endswith(self.EXECUTABLE_EXTENSIONS): extracted_path = " ".join(components[0:index + 1]) return [extracted_path] return []
130,825
EnvVarsPostProcessor constructor. Args: vars_map: Dictionary of "string" -> "string|list", i.e. a mapping of environment variables names to their suggested values or to lists of their suggested values.
def __init__(self, vars_map): super(core.PostProcessor, self).__init__() self.vars_map = {} for var_name, value in iteritems(vars_map): var_regex = re.compile( re.escape("%" + var_name + "%"), flags=re.IGNORECASE) self.vars_map[var_name.lower()] = (var_regex, value)
130,826
Processes a given path. Args: path: Path (as a string) to post-process. Returns: A list of paths with environment variables replaced with their values. If the mapping had a list of values for a particular variable, instead of just one value, then all possible replacements will be returned.
def Process(self, path): path = re.sub(self.SYSTEMROOT_RE, r"%systemroot%", path, count=1) path = re.sub(self.SYSTEM32_RE, r"%systemroot%\\system32", path, count=1) matches_iter = self.WIN_ENVIRON_REGEX.finditer(path) var_names = set(m.group(1).lower() for m in matches_iter) results = [path] for var_name in var_names: try: var_regex, var_value = self.vars_map[var_name] except KeyError: continue if isinstance(var_value, string_types): replacements = [var_value] else: replacements = var_value processed_results = [] for result in results: for repl in replacements: # Using lambda here, as otherwise Python interprets \\f as a # backreference (same applies to \\0 and \\1). When using a # function as a replacement argument, backreferences are ignored. # pylint: disable=cell-var-from-loop processed_results.append(var_regex.sub(lambda _: repl, result)) results = processed_results return results
130,827
ApiBinaryStream constructor. Args: filename: A file name to be used by the browser when user downloads the file. content_generator: A generator that yields byte chunks (of any size) to be streamed to the user. content_length: The length of the stream, if known upfront. Raises: ValueError: if content_generator is None.
def __init__(self, filename, content_generator=None, content_length=None): precondition.AssertType(filename, Text) self.filename = filename self.content_length = content_length if content_generator is None: raise ValueError("content_generator can't be None") self.content_generator = content_generator
130,833
Retrieves the total size in bytes of an AFF4 object. Args: aff4_obj: An AFF4 stream instance to retrieve size for. Returns: An integer representing number of bytes. Raises: TypeError: If `aff4_obj` is not an instance of AFF4 stream.
def _Aff4Size(aff4_obj): if not isinstance(aff4_obj, aff4.AFF4Stream): message = "Expected an instance of `%s` but received `%s`" raise TypeError(message % (aff4.AFF4Stream, type(aff4_obj))) return int(aff4_obj.Get(aff4_obj.Schema.SIZE))
130,842
Reads contents of given AFF4 file. Args: aff4_obj: An AFF4 stream instance to retrieve contents for. offset: An offset to start the reading from. length: A number of bytes to read. Reads the whole file if 0. Returns: Contents of specified AFF4 stream. Raises: TypeError: If `aff4_obj` is not an instance of AFF4 stream.
def _Aff4Read(aff4_obj, offset, length): length = length or (_Aff4Size(aff4_obj) - offset) aff4_obj.Seek(offset) return aff4_obj.Read(length)
130,843
Initializes the current instance from an Aff4Object. Iterates the inheritance hierarchy of the given Aff4Object and adds a ApiAff4ObjectType for each class found in the hierarchy. Args: aff4_obj: An Aff4Object as source for the initialization. Returns: A reference to the current instance.
def InitFromAff4Object(self, aff4_obj): attr_blacklist = [] # We use this to show attributes only once. self.types = [] for aff4_cls in aff4_obj.__class__.__mro__: if not hasattr(aff4_cls, "SchemaCls"): continue type_repr = ApiAff4ObjectType().InitFromAff4Object( aff4_obj, aff4_cls, attr_blacklist) if type_repr.attributes: self.types.append(type_repr) # Add all attribute names from this type representation to the # blacklist to not add them to the result again. attr_blacklist.extend([attr.name for attr in type_repr.attributes]) return self
130,850
Sign a buffer via temp files. Our signing tool can't sign a buffer, so we work around it using temporary files. Args: in_buffer: data to sign Returns: signed data
def SignBuffer(self, in_buffer): precondition.AssertType(in_buffer, bytes) with tempfile.NamedTemporaryFile() as temp_in: temp_in.write(in_buffer) temp_in.seek(0) outfile = self.SignFile(temp_in.name) with io.open(outfile, "rb") as filedesc: return filedesc.read()
130,896
Sign a file using osslsigncode. Args: in_filename: file to read from out_filename: file to output to, if none we output to the same filename as the input with a .signed suffix. Returns: output filename string Raises: pexpect.ExceptionPexpect: if the expect invocation of osslsigncode fails. SigningError: for signing failures.
def SignFile(self, in_filename, out_filename=None): if out_filename is None: out_filename = "%s.signed" % in_filename args = [ "-certs", self.cert, "-key", self.key, "-n", self.application, "-t", "http://timestamp.verisign.com/scripts/timestamp.dll", "-h", "sha1", "-in", in_filename, "-out", out_filename ] try: output_log = io.StringIO() ossl = pexpect.spawn("osslsigncode", args) # Use logfile_read so we don't get the password we're injecting. ossl.logfile_read = output_log ossl.expect("Enter PEM pass phrase") ossl.sendline(self.password) ossl.wait() except pexpect.ExceptionPexpect: output_log.seek(0) logging.exception(output_log.read()) raise if not os.path.exists(out_filename): raise SigningError("Expected output %s not created" % out_filename) try: subprocess.check_call(["osslsigncode", "verify", "-in", out_filename]) except subprocess.CalledProcessError: logging.exception("Bad signature verification on %s", out_filename) raise SigningError("Bad signature verification on %s" % out_filename) return out_filename
130,897
Take an aff4 path and download all files in it to output_dir. Args: aff4_path: Any aff4 path as a string output_dir: A local directory to write to, will be created if not there. bufsize: Buffer size to use. preserve_path: If set all paths will be created. Note that this works for collections as well. It will download all files in the collection. This only downloads files that are already in the datastore, it doesn't queue anything on the client.
def DownloadDir(aff4_path, output_dir, bufsize=8192, preserve_path=True): if not os.path.isdir(output_dir): os.makedirs(output_dir) fd = aff4.FACTORY.Open(aff4_path) for child in fd.OpenChildren(): if preserve_path: # Get a full path without the aff4: full_dir = utils.JoinPath(output_dir, child.urn.Path()) full_dir = os.path.dirname(full_dir) if not os.path.isdir(full_dir): os.makedirs(full_dir) outfile = os.path.join(full_dir, child.urn.Basename()) else: outfile = os.path.join(output_dir, child.urn.Basename()) logging.info(u"Downloading %s to %s", child.urn, outfile) with open(outfile, "wb") as out_fd: try: buf = child.Read(bufsize) while buf: out_fd.write(buf) buf = child.Read(bufsize) except IOError as e: logging.error("Failed to read %s. Err: %s", child.urn, e)
130,901
Opens the client, getting potential approval tokens. Args: client_id: The client id that should be opened. token: Token to use to open the client Returns: tuple containing (client, token) objects or (None, None) on if no appropriate aproval tokens were found.
def OpenClient(client_id=None, token=None): if not token: try: token = ApprovalFind(client_id, token=token) except access_control.UnauthorizedAccess as e: logging.debug("No authorization found for access to client: %s", e) try: # Try and open with the token we managed to retrieve or the default. client = aff4.FACTORY.Open( rdfvalue.RDFURN(client_id), mode="r", token=token) return client, token except access_control.UnauthorizedAccess: logging.warning( "Unable to find a valid reason for client %s. You may need " "to request approval.", client_id) return None, None
130,902
Revokes an approval for a given token. This method requires raw datastore access to manipulate approvals directly. Args: aff4_path: The aff4_path or client id the approval should be created for. token: The token that should be revoked.
def ApprovalRevokeRaw(aff4_path, token): try: urn = rdf_client.ClientURN(aff4_path) except type_info.TypeValueError: urn = rdfvalue.RDFURN(aff4_path) approval_urn = aff4.ROOT_URN.Add("ACL").Add(urn.Path()).Add( token.username).Add(utils.EncodeReasonString(token.reason)) super_token = access_control.ACLToken(username="raw-approval-superuser") super_token.supervisor = True approval_request = aff4.FACTORY.Open( approval_urn, mode="rw", token=super_token) approval_request.DeleteAttribute(approval_request.Schema.APPROVER) approval_request.Close()
130,909
A script to remove no-op client versions. This script removes versions of a client when it is identical to the previous, in the sense that no versioned attributes were changed since the previous client version. Args: clients: A list of ClientURN, if empty cleans all clients. dry_run: whether this is a dry run
def CleanVacuousVersions(clients=None, dry_run=True): if not clients: index = client_index.CreateClientIndex() clients = index.LookupClients(["."]) clients.sort() with data_store.DB.GetMutationPool() as pool: logging.info("checking %d clients", len(clients)) for batch in collection.Batch(clients, 10000): # TODO(amoser): This only works on datastores that use the Bigtable # scheme. client_infos = data_store.DB.MultiResolvePrefix( batch, ["aff4:", "aff4:"], data_store.DB.ALL_TIMESTAMPS) for client, type_list in client_infos: cleared = 0 kept = 0 updates = [] for a, _, ts in type_list: if ts != 0: updates.append((ts, a)) updates = sorted(updates) dirty = True for ts, a in updates: if a == "aff4:type": if dirty: kept += 1 dirty = False else: cleared += 1 if not dry_run: pool.DeleteAttributes(client, ["aff4:type"], start=ts, end=ts) if pool.Size() > 1000: pool.Flush() else: dirty = True logging.info("%s: kept %d and cleared %d", client, kept, cleared)
130,913
r"""A script to export clients summaries selected by a keyword search. This script does a client search for machines matching all of keywords and writes a .csv summary of the results to filename. Multi-value fields are '\n' separated. Args: keywords: a list of keywords to search for filename: the name of the file to write to, will be replaced if already present token: datastore token.
def ExportClientsByKeywords(keywords, filename, token=None): r index = client_index.CreateClientIndex(token=token) client_list = index.LookupClients(keywords) logging.info("found %d clients", len(client_list)) if not client_list: return writer = csv.DictWriter([ u"client_id", u"hostname", u"last_seen", u"os", u"os_release", u"os_version", u"users", u"ips", u"macs", ]) writer.WriteHeader() for client in aff4.FACTORY.MultiOpen(client_list, token=token): s = client.Schema writer.WriteRow({ u"client_id": client.urn.Basename(), u"hostname": client.Get(s.HOSTNAME), u"os": client.Get(s.SYSTEM), u"os_release": client.Get(s.OS_RELEASE), u"os_version": client.Get(s.OS_VERSION), u"ips": client.Get(s.HOST_IPS), u"macs": client.Get(s.MAC_ADDRESS), u"users": "\n".join(client.Get(s.USERNAMES, [])), u"last_seen": client.Get(s.PING), }) with io.open(filename, "w") as csv_out: csv_out.write(writer.Content())
130,914
Launches the flow and worker and waits for it to finish. Args: client_id: The client common name we issue the request. flow_name: The name of the flow to launch. **kwargs: passthrough to flow. Returns: A flow session id. Note: you need raw access to run this flow as it requires running a worker.
def StartFlowAndWorker(client_id, flow_name, **kwargs): # Empty token, only works with raw access. queue = rdfvalue.RDFURN("DEBUG-%s-" % getpass.getuser()) if "token" in kwargs: token = kwargs.pop("token") else: token = access_control.ACLToken(username="GRRConsole") session_id = flow.StartAFF4Flow( client_id=client_id, flow_name=flow_name, queue=queue, token=token, **kwargs) worker_thrd = worker_lib.GRRWorker( queues=[queue], token=token, threadpool_size=1) while True: try: worker_thrd.RunOnce() except KeyboardInterrupt: print("exiting") worker_thrd.thread_pool.Join() break time.sleep(2) with aff4.FACTORY.Open(session_id, token=token) as flow_obj: if not flow_obj.GetRunner().IsRunning(): break # Terminate the worker threads worker_thrd.thread_pool.Join() return session_id
130,915
Wake up stuck flows. A stuck flow is one which is waiting for the client to do something, but the client requests have been removed from the client queue. This can happen if the system is too loaded and the client messages have TTLed out. In this case we reschedule the client requests for this session. Args: session_id: The session for the flow to wake. Returns: The total number of client messages re-queued.
def WakeStuckFlow(session_id): session_id = rdfvalue.SessionID(session_id) woken = 0 checked_pending = False with queue_manager.QueueManager() as manager: for request, responses in manager.FetchRequestsAndResponses(session_id): # We need to check if there are client requests pending. if not checked_pending: task = manager.Query( request.client_id, task_id="task:%s" % request.request.task_id) if task: # Client has tasks pending already. return checked_pending = True if (not responses or responses[-1].type != rdf_flows.GrrMessage.Type.STATUS): manager.QueueClientMessage(request.request) woken += 1 if responses and responses[-1].type == rdf_flows.GrrMessage.Type.STATUS: manager.QueueNotification(session_id) return woken
130,916
Instantiates a new StatsServer. Args: port: The TCP port that the server should listen to.
def __init__(self, port): super(StatsServer, self).__init__(port) self._http_server = None self._server_thread = None
130,917
Creates a ZIP archive of the files in the input directory. Args: input_dir: the name of the input directory. output_file: the name of the output ZIP archive without extension.
def MakeZip(self, input_dir, output_file): logging.info("Generating zip template file at %s", output_file) zf = zipfile.ZipFile(output_file, "w") oldwd = os.getcwd() os.chdir(input_dir) for path in ["debian", "rpmbuild", "fleetspeak"]: for root, _, files in os.walk(path): for f in files: zf.write(os.path.join(root, f)) zf.close() os.chdir(oldwd)
130,946
Publish the message into all listeners of the event. We send the message to all event handlers which contain this string in their EVENT static member. This allows the event to be sent to multiple interested listeners. Args: event_name: An event name. msg: The message to send to the event handler. token: ACL token. Raises: ValueError: If the message is invalid. The message must be a Semantic Value (instance of RDFValue) or a full GrrMessage.
def PublishEvent(cls, event_name, msg, token=None): cls.PublishMultipleEvents({event_name: [msg]}, token=token)
130,964
Publishes multiple messages at once. Args: events: A dict with keys being event names and values being lists of messages. token: ACL token. Raises: ValueError: If the message is invalid. The message must be a Semantic Value (instance of RDFValue) or a full GrrMessage.
def PublishMultipleEvents(cls, events, token=None): event_name_map = registry.EventRegistry.EVENT_NAME_MAP for event_name, messages in iteritems(events): if not isinstance(event_name, string_types): raise ValueError( "Event names should be string, got: %s" % type(event_name)) for msg in messages: if not isinstance(msg, rdfvalue.RDFValue): raise ValueError("Can only publish RDFValue instances.") for event_cls in event_name_map.get(event_name, []): event_cls().ProcessMessages(messages, token=token)
130,965
Expand values from various attribute types. Strings are returned as is. Dictionaries are returned with a key string, and an expanded set of values. Other iterables are expanded until they flatten out. Other items are returned in string format. Args: obj: The object to expand out. parent: The parent object: Used to short-circuit infinite recursion. Returns: a list of expanded values as strings.
def FanOut(self, obj, parent=None): # Catch cases where RDFs are iterable but return themselves. if parent and obj == parent: results = [utils.SmartUnicode(obj).strip()] elif isinstance(obj, (string_types, rdf_structs.EnumNamedValue)): results = [utils.SmartUnicode(obj).strip()] elif isinstance(obj, rdf_protodict.DataBlob): results = self.FanOut(obj.GetValue()) elif isinstance(obj, (collections.Mapping, rdf_protodict.Dict)): results = [] # rdf_protodict.Dict only has items, not iteritems. for k, v in iteritems(obj): expanded_v = [utils.SmartUnicode(r) for r in self.FanOut(v)] results.append("%s:%s" % (utils.SmartUnicode(k), ",".join(expanded_v))) elif isinstance(obj, (collections.Iterable, rdf_structs.RepeatedFieldHelper)): results = [] for rslt in [self.FanOut(o, obj) for o in obj]: results.extend(rslt) else: results = [utils.SmartUnicode(obj).strip()] return results
130,967
Refresh an old attribute. Note that refreshing the attribute is asynchronous. It does not change anything about the current object - you need to reopen the same URN some time later to get fresh data. Attributes: CONTAINS - Refresh the content of the directory listing. Args: attribute: An attribute object as listed above. Returns: The Flow ID that is pending Raises: IOError: If there has been an error starting the flow.
def Update(self, attribute=None): # client id is the first path element client_id = self.urn.Split()[0] if attribute == "CONTAINS": # Get the pathspec for this object flow_id = flow.StartAFF4Flow( client_id=client_id, # Dependency loop: aff4_objects/aff4_grr.py depends on # aff4_objects/standard.py that depends on flows/general/filesystem.py # that eventually depends on aff4_objects/aff4_grr.py # flow_name=filesystem.ListDirectory.__name__, flow_name="ListDirectory", pathspec=self.real_pathspec, notify_to_user=False, token=self.token) return flow_id
130,970
Inserts one or multiple rows into the given table. Args: cursor: The MySQL cursor to perform the insertion. table: The table name, where rows should be inserted. values: A list of dicts, associating column names to values.
def _Insert(cursor, table, values): precondition.AssertIterableType(values, dict) if not values: # Nothing can be INSERTed with empty `values` list. return column_names = list(sorted(values[0])) for value_dict in values: if set(column_names) != set(value_dict): raise ValueError("Given value dictionaries must have identical keys. " "Expecting columns {!r}, but got value {!r}".format( column_names, value_dict)) query = "INSERT IGNORE INTO %s {cols} VALUES {vals}" % table query = query.format( cols=mysql_utils.Columns(column_names), vals=mysql_utils.Placeholders(num=len(column_names), values=len(values))) values_list = [] for values_dict in values: values_list.extend(values_dict[column] for column in column_names) cursor.execute(query, values_list)
131,002
Calculates hash ids and writes contents of given data blobs. Args: blobs_data: An iterable of bytes. Returns: A list of rdf_objects.BlobID objects with each blob id corresponding to an element in the original blobs_data argument.
def WriteBlobsWithUnknownHashes( self, blobs_data): blobs_ids = [rdf_objects.BlobID.FromBlobData(d) for d in blobs_data] self.WriteBlobs(dict(zip(blobs_ids, blobs_data))) return blobs_ids
131,010
Returns q's client id, if q is a client task queue, otherwise None. Args: q: rdfvalue.RDFURN Returns: string or None
def _GetClientIdFromQueue(q): split = q.Split() if not split or len(split) < 2: return None # Normalize to lowercase. split = [s.lower() for s in split] str_client_id, tasks_marker = split if not str_client_id.startswith("c.") or tasks_marker != "tasks": return None # The "C." prefix should be uppercase. str_client_id = "C" + str_client_id[1:] return str_client_id
131,019
Removes the tasks from the queue. Note that tasks can already have been removed. It is not an error to re-delete an already deleted task. Args: queue: A queue to clear. tasks: A list of tasks to remove. Tasks may be Task() instances or integers representing the task_id. mutation_pool: A MutationPool object to schedule deletions on. Raises: ValueError: Mutation pool was not passed in.
def Delete(self, queue, tasks, mutation_pool=None): if queue is None: return if mutation_pool is None: raise ValueError("Mutation pool can't be none.") mutation_pool.QueueDeleteTasks(queue, tasks)
131,036
Returns notifications for all shards of a queue at once. Used by worker_test_lib.MockWorker to cover all shards with a single worker. Args: queue: usually rdfvalue.RDFURN("aff4:/W") Returns: List of rdf_flows.GrrNotification objects
def GetNotificationsForAllShards(self, queue): notifications_by_session_id = {} for queue_shard in self.GetAllNotificationShards(queue): self._GetUnsortedNotifications( queue_shard, notifications_by_session_id=notifications_by_session_id) return notifications_by_session_id.values()
131,039
Returns all the available notifications for a queue_shard. Args: queue_shard: urn of queue shard notifications_by_session_id: store notifications in this dict rather than creating a new one Returns: dict of notifications. keys are session ids.
def _GetUnsortedNotifications(self, queue_shard, notifications_by_session_id=None): if notifications_by_session_id is None: notifications_by_session_id = {} end_time = self.frozen_timestamp or rdfvalue.RDFDatetime.Now() for notification in self.data_store.GetNotifications(queue_shard, end_time): existing = notifications_by_session_id.get(notification.session_id) if existing: # If we have a notification for this session_id already, we only store # the one that was scheduled last. if notification.first_queued > existing.first_queued: notifications_by_session_id[notification.session_id] = notification elif notification.first_queued == existing.first_queued and ( notification.last_status > existing.last_status): # Multiple notifications with the same timestamp should not happen. # We can still do the correct thing and use the latest one. logging.warning( "Notifications with equal first_queued fields detected: %s %s", notification, existing) notifications_by_session_id[notification.session_id] = notification else: notifications_by_session_id[notification.session_id] = notification return notifications_by_session_id
131,040
This is the same as NotifyQueue but for several session_ids at once. Args: notifications: A list of notifications. mutation_pool: A MutationPool object to schedule Notifications on. Raises: RuntimeError: An invalid session_id was passed.
def MultiNotifyQueue(self, notifications, mutation_pool=None): extract_queue = lambda notification: notification.session_id.Queue() for queue, notifications in iteritems( collection.Group(notifications, extract_queue)): self._MultiNotifyQueue(queue, notifications, mutation_pool=mutation_pool)
131,042
Retrieves tasks from a queue without leasing them. This is good for a read only snapshot of the tasks. Args: queue: The task queue that this task belongs to, usually client.Queue() where client is the ClientURN object you want to schedule msgs on. limit: Number of values to fetch. Returns: A list of Task() objects.
def Query(self, queue, limit=1): # This function is usually used for manual testing so we also accept client # ids and get the queue from it. if isinstance(queue, rdf_client.ClientURN): queue = queue.Queue() return self.data_store.QueueQueryTasks(queue, limit=limit)
131,046
Returns a list of Tasks leased for a certain time. Args: queue: The queue to query from. lease_seconds: The tasks will be leased for this long. limit: Number of values to fetch. Returns: A list of GrrMessage() objects leased.
def QueryAndOwn(self, queue, lease_seconds=10, limit=1): with self.data_store.GetMutationPool() as mutation_pool: return mutation_pool.QueueQueryAndOwn(queue, lease_seconds, limit, self.frozen_timestamp)
131,047
Retrieves responses for a well known flow. Args: session_id: The session_id to get the requests/responses for. Yields: The retrieved responses.
def FetchResponses(self, session_id): timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now()) for response in self.data_store.FetchResponsesForWellKnownFlow( session_id, self.response_limit, timestamp=timestamp): yield response
131,048
Entry point for processing jobs. Args: message: The GrrMessage that was delivered from the server. Raises: RuntimeError: The client action requested was not found.
def HandleMessage(self, message): self._is_active = True try: action_cls = actions.ActionPlugin.classes.get(message.name) if action_cls is None: raise RuntimeError("Client action %r not known" % message.name) action = action_cls(grr_worker=self) # Write the message to the transaction log. self.transaction_log.Write(message) # Heartbeat so we have the full period to work on this message. action.Progress() action.Execute(message) # If we get here without exception, we can remove the transaction. self.transaction_log.Clear() finally: self._is_active = False # We want to send ClientStats when client action is complete. self.stats_collector.RequestSend()
131,068
Retrieves and removes the messages from the queue. Args: soft_size_limit: int If there is more data in the queue than soft_size_limit bytes, the returned list of messages will be approximately this large. If None (default), returns all messages currently on the queue. Returns: rdf_flows.MessageList A list of messages that were .Put on the queue earlier.
def GetMessages(self, soft_size_limit=None): with self._lock: ret = rdf_flows.MessageList() ret_size = 0 for message in self._Generate(): self._total_size -= len(message) ret.job.append(rdf_flows.GrrMessage.FromSerializedString(message)) ret_size += len(message) if soft_size_limit is not None and ret_size > soft_size_limit: break return ret
131,077
Constructor. Args: ca_cert: String representation of a CA certificate to use for checking server certificate. worker_cls: The client worker class to use. Defaults to GRRClientWorker. private_key: The private key for this client. Defaults to config Client.private_key.
def __init__(self, ca_cert=None, worker_cls=None, private_key=None): self.ca_cert = ca_cert if private_key is None: private_key = config.CONFIG.Get("Client.private_key", default=None) # The server's PEM encoded certificate. self.server_certificate = None # This manages our HTTP connections. Note: The comms thread is allowed to # block indefinitely since the worker thread is responsible for # heart-beating the nanny. We assume that HTTP requests can not block # indefinitely. self.http_manager = self.http_manager_class() # The communicator manages our crypto with the server. self.communicator = ClientCommunicator(private_key=private_key) # This controls our polling frequency. self.timer = Timer() # The time we last sent an enrollment request. Enrollment requests are # throttled especially to a maximum of one every 10 minutes. self.last_enrollment_time = 0 # The time we last checked with the foreman. self.last_foreman_check = 0 # The client worker does all the real work here. if worker_cls: self.client_worker = worker_cls(client=self) else: self.client_worker = GRRClientWorker(client=self) # TODO(hanuszczak): Maybe we should start the thread in `GRRHTTPClient::Run` # method instead? Starting threads in constructor is rarely a good idea, is # it guaranteed that we call `GRRHTTPClient::Run` only once? self.client_worker.start()
131,078
Check the server PEM for validity. This is used to determine connectivity to the server. Sometimes captive portals return a valid HTTP status, but the data is corrupted. Args: http_object: The response received from the server. Returns: True if the response contains a valid server certificate.
def VerifyServerPEM(self, http_object): try: server_pem = http_object.data server_url = http_object.url if b"BEGIN CERTIFICATE" in server_pem: # Now we know that this proxy is working. We still have to verify the # certificate. This will raise if the server cert is invalid. server_certificate = rdf_crypto.RDFX509Cert(server_pem) self.communicator.LoadServerCertificate( server_certificate=server_certificate, ca_certificate=self.ca_cert) logging.info("Server PEM re-keyed.") return True except Exception as e: # pylint: disable=broad-except logging.info("Unable to verify server certificate at %s: %s", server_url, e) return False
131,079
Parses table of osquery output. Args: table: A table in a "parsed JSON" representation. Returns: A parsed `rdf_osquery.OsqueryTable` instance.
def ParseTable(table): precondition.AssertIterableType(table, dict) result = rdf_osquery.OsqueryTable() result.header = ParseHeader(table) for row in table: result.rows.append(ParseRow(result.header, row)) return result
131,099
Parses header of osquery output. Args: table: A table in a "parsed JSON" representation. Returns: A parsed `rdf_osquery.OsqueryHeader` instance.
def ParseHeader(table): precondition.AssertIterableType(table, dict) prototype = None # type: List[Text] for row in table: columns = list(iterkeys(row)) if prototype is None: prototype = columns elif prototype != columns: message = "Expected columns '{expected}', got '{actual}' for table {json}" message = message.format(expected=prototype, actual=columns, json=table) raise ValueError(message) result = rdf_osquery.OsqueryHeader() for name in prototype or []: result.columns.append(rdf_osquery.OsqueryColumn(name=name)) return result
131,100
Parses a single row of osquery output. Args: header: A parsed header describing the row format. row: A row in a "parsed JSON" representation. Returns: A parsed `rdf_osquery.OsqueryRow` instance.
def ParseRow(header, row): precondition.AssertDictType(row, Text, Text) result = rdf_osquery.OsqueryRow() for column in header.columns: result.values.append(row[column.name]) return result
131,101
Calls osquery with given query and returns its output. Args: args: A query to call osquery with. Returns: A "parsed JSON" representation of the osquery output. Raises: QueryError: If the query is incorrect. TimeoutError: If a call to the osquery executable times out. Error: If anything else goes wrong with the subprocess call.
def Query(args): query = args.query.encode("utf-8") timeout = args.timeout_millis / 1000 # `subprocess.run` uses seconds. # TODO: pytype is not aware of the backport. # pytype: disable=module-attr try: # We use `--S` to enforce shell execution. This is because on Windows there # is only `osqueryd` and `osqueryi` is not available. However, by passing # `--S` we can make `osqueryd` behave like `osqueryi`. Since this flag also # works with `osqueryi`, by passing it we simply expand number of supported # executable types. command = [config.CONFIG["Osquery.path"], "--S", "--json", query] proc = subprocess.run( command, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # TODO: Since we use a backported API, `SubprocessError` is hard # to work with. Until support for Python 2 is dropped we re-raise with simpler # exception type because we don't really care that much (the exception message # should be detailed enough anyway). except subprocess.TimeoutExpired as error: raise TimeoutError(cause=error) except subprocess.CalledProcessError as error: raise Error("osquery invocation error", cause=error) # pytype: enable=module-attr stdout = proc.stdout.decode("utf-8") stderr = proc.stderr.decode("utf-8").strip() return ProcOutput(stdout=stdout, stderr=stderr)
131,102
Creates a cron job that runs given flow with a given frequency. Args: cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs. job_id: Use this job_id instead of an autogenerated unique name (used for system cron jobs - we want them to have well-defined persistent name). token: Security token used for data store access. enabled: If False, the job object will be created, but will be disabled. Returns: Name of the cron job created.
def CreateJob(self, cron_args=None, job_id=None, token=None, enabled=True): if not job_id: uid = random.UInt16() job_id = "%s_%s" % (cron_args.flow_name, uid) flow_runner_args = rdf_flow_runner.FlowRunnerArgs( flow_name="CreateAndRunGenericHuntFlow") flow_args = rdf_hunts.CreateGenericHuntFlowArgs() flow_args.hunt_args.flow_args = cron_args.flow_args flow_args.hunt_args.flow_runner_args.flow_name = cron_args.flow_name flow_args.hunt_runner_args = cron_args.hunt_runner_args flow_args.hunt_runner_args.hunt_name = "GenericHunt" create_cron_args = rdf_cronjobs.CreateCronJobFlowArgs( description=cron_args.description, periodicity=cron_args.frequency, flow_runner_args=flow_runner_args, flow_args=flow_args, allow_overruns=cron_args.allow_overruns, lifetime=cron_args.lifetime) cron_job_urn = self.CRON_JOBS_PATH.Add(job_id) with aff4.FACTORY.Create( cron_job_urn, aff4_type=CronJob, mode="rw", token=token, force_new_version=False) as cron_job: # If the cronjob was already present we don't want to overwrite the # original start_time. existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS) if existing_cron_args and existing_cron_args.start_time: create_cron_args.start_time = existing_cron_args.start_time if create_cron_args != existing_cron_args: cron_job.Set(cron_job.Schema.CRON_ARGS(create_cron_args)) cron_job.Set(cron_job.Schema.DISABLED(not enabled)) return job_id
131,123
Tries to lock and run cron jobs. Args: token: security token force: If True, force a run names: List of job names to run. If unset, run them all
def RunOnce(self, token=None, force=False, names=None): names = names or self.ListJobs(token=token) urns = [self.CRON_JOBS_PATH.Add(name) for name in names] for cron_job_urn in urns: try: with aff4.FACTORY.OpenWithLock( cron_job_urn, blocking=False, token=token, lease_time=600) as cron_job: try: logging.info("Running cron job: %s", cron_job.urn) cron_job.Run(force=force) except Exception as e: # pylint: disable=broad-except logging.exception("Error processing cron job %s: %s", cron_job.urn, e) stats_collector_instance.Get().IncrementCounter( "cron_internal_error") except aff4.LockError: pass
131,130
Do the actual work of the Cron. Will first check if DueToRun is True. CronJob object must be locked (i.e. opened via OpenWithLock) for Run() to be called. Args: force: If True, the job will run no matter what (i.e. even if DueToRun() returns False). Raises: LockError: if the object is not locked.
def Run(self, force=False): if not self.locked: raise aff4.LockError("CronJob must be locked for Run() to be called.") self.KillOldFlows() # If currently running flow has finished, update our state. current_flow_urn = self.Get(self.Schema.CURRENT_FLOW_URN) if current_flow_urn: current_flow = aff4.FACTORY.Open(current_flow_urn, token=self.token) runner = current_flow.GetRunner() if not runner.IsRunning(): if runner.context.state == rdf_flow_runner.FlowContext.State.ERROR: self.Set( self.Schema.LAST_RUN_STATUS, rdf_cronjobs.CronJobRunStatus( status=rdf_cronjobs.CronJobRunStatus.Status.ERROR)) stats_collector_instance.Get().IncrementCounter( "cron_job_failure", fields=[self.urn.Basename()]) else: self.Set( self.Schema.LAST_RUN_STATUS, rdf_cronjobs.CronJobRunStatus( status=rdf_cronjobs.CronJobRunStatus.Status.OK)) start_time = self.Get(self.Schema.LAST_RUN_TIME) elapsed = time.time() - start_time.AsSecondsSinceEpoch() stats_collector_instance.Get().RecordEvent( "cron_job_latency", elapsed, fields=[self.urn.Basename()]) self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN) self.Flush() if not force and not self.DueToRun(): return # Make sure the flow is created with cron job as a parent folder. cron_args = self.Get(self.Schema.CRON_ARGS) cron_args.flow_runner_args.base_session_id = self.urn flow_urn = flow.StartAFF4Flow( runner_args=cron_args.flow_runner_args, args=cron_args.flow_args, token=self.token, sync=False) self.Set(self.Schema.CURRENT_FLOW_URN, flow_urn) self.Set(self.Schema.LAST_RUN_TIME, rdfvalue.RDFDatetime.Now()) self.Flush()
131,141
Adds a new arg to this expression. Args: arg: The argument to add (string). Returns: True if this arg is the last arg, False otherwise. Raises: ParseError: If there are too many args.
def AddArg(self, arg): self.args.append(arg) if len(self.args) > self.number_of_args: raise ParseError("Too many args for this expression.") elif len(self.args) == self.number_of_args: return True return False
131,155
Escape backslashes found inside a string quote. Backslashes followed by anything other than ['"rnbt] will just be included in the string. Args: string: The string that matched. match: The match object (m.group(1) is the escaped code)
def StringEscape(self, string, match, **_): precondition.AssertType(string, Text) if match.group(1) in "'\"rnbt": self.string += compatibility.UnescapeString(string) else: self.string += string
131,157
Wait until the approval is valid (i.e. - approved). Args: timeout: timeout in seconds. None means default timeout (1 hour). 0 means no timeout (wait forever). Returns: Operation object with refreshed target_file. Raises: PollTimeoutError: if timeout is reached.
def WaitUntilValid(self, timeout=None): return utils.Poll( generator=self.Get, condition=lambda f: f.data.is_valid, timeout=timeout)
131,169
Create a timestamp using a start and end time. Args: start: Start timestamp. end: End timestamp. Returns: A tuple (start, end) of converted timestamps or None for all time.
def _MakeTimestamp(self, start=None, end=None): mysql_unsigned_bigint_max = 18446744073709551615 ts_start = int(start or 0) if end is None: ts_end = mysql_unsigned_bigint_max else: ts_end = int(end) if ts_start == 0 and ts_end == mysql_unsigned_bigint_max: return None else: return (ts_start, ts_end)
131,227
Associates keywords with name. Records that keywords are associated with name. Args: name: A name which should be associated with some keywords. keywords: A collection of keywords to associate with name.
def AddKeywordsForName(self, name, keywords): data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords)
131,234
Removes keywords for a name. Args: name: A name which should not be associated with some keywords anymore. keywords: A collection of keywords.
def RemoveKeywordsForName(self, name, keywords): data_store.DB.IndexRemoveKeywordsForName(self.urn, name, keywords)
131,235
Checks whether a connection can be established to MySQL. Args: db_options: A dict mapping GRR MySQL config options to their values. Returns: A boolean indicating whether a connection could be made to a MySQL server instance with the given options.
def CheckMySQLConnection(db_options): for tries_left in range(_MYSQL_MAX_RETRIES, -1, -1): try: connection_options = dict( host=db_options["Mysql.host"], port=db_options["Mysql.port"], db=db_options["Mysql.database_name"], user=db_options["Mysql.database_username"], passwd=db_options["Mysql.database_password"], charset="utf8") ssl_enabled = "Mysql.client_key_path" in db_options if ssl_enabled: connection_options["ssl"] = { "key": db_options["Mysql.client_key_path"], "cert": db_options["Mysql.client_cert_path"], "ca": db_options["Mysql.ca_cert_path"], } connection = MySQLdb.connect(**connection_options) if ssl_enabled: cursor = connection.cursor() cursor.execute("SHOW VARIABLES LIKE 'have_ssl'") res = cursor.fetchone() if res[0] == "have_ssl" and res[1] == "YES": print("SSL enabled successfully.") else: print("Unable to establish SSL connection to MySQL.") return False return True except MySQLdb.OperationalError as mysql_op_error: if len(mysql_op_error.args) < 2: # We expect the exception's arguments to be an error-code and # an error message. print("Unexpected exception type received from MySQL. %d attempts " "left: %s" % (tries_left, mysql_op_error)) time.sleep(_MYSQL_RETRY_WAIT_SECS) continue if mysql_op_error.args[0] == mysql_conn_errors.CONNECTION_ERROR: print("Failed to connect to MySQL. Is it running? %d attempts left." % tries_left) elif mysql_op_error.args[0] == mysql_conn_errors.UNKNOWN_HOST: print("Unknown-hostname error encountered while trying to connect to " "MySQL.") return False # No need for retry. elif mysql_op_error.args[0] == general_mysql_errors.BAD_DB_ERROR: # GRR db doesn't exist yet. That's expected if this is the initial # setup. return True elif mysql_op_error.args[0] in ( general_mysql_errors.ACCESS_DENIED_ERROR, general_mysql_errors.DBACCESS_DENIED_ERROR): print("Permission error encountered while trying to connect to " "MySQL: %s" % mysql_op_error) return False # No need for retry. else: print("Unexpected operational error encountered while trying to " "connect to MySQL. %d attempts left: %s" % (tries_left, mysql_op_error)) except MySQLdb.Error as mysql_error: print("Unexpected error encountered while trying to connect to MySQL. " "%d attempts left: %s" % (tries_left, mysql_error)) time.sleep(_MYSQL_RETRY_WAIT_SECS) return False
131,240
Signs a binary and uploads it to the datastore. Args: source_path: Path to the binary to upload. binary_type: Type of the binary, e.g python-hack or executable. platform: Client platform where the binary is intended to be run. upload_subdirectory: Path of a subdirectory to upload the binary to, relative to the canonical path for binaries of the given type and platform. Raises: BinaryTooLargeError: If the binary to upload is too large.
def UploadSignedBinary(source_path, binary_type, platform, upload_subdirectory=""): file_size = os.path.getsize(source_path) if file_size > _MAX_SIGNED_BINARY_BYTES: raise BinaryTooLargeError( "File [%s] is of size %d (bytes), which exceeds the allowed maximum " "of %d bytes." % (source_path, file_size, _MAX_SIGNED_BINARY_BYTES)) context = ["Platform:%s" % platform.title(), "Client Context"] signing_key = grr_config.CONFIG.Get( "PrivateKeys.executable_signing_private_key", context=context) root_api = maintenance_utils.InitGRRRootAPI() binary_path = "/".join([ platform.lower(), upload_subdirectory, os.path.basename(source_path), ]) binary = root_api.GrrBinary(binary_type, binary_path) with open(source_path, "rb") as fd: binary.Upload( fd, sign_fn=binary.DefaultUploadSigner( private_key=signing_key.GetRawPrivateKey())) print("Uploaded %s to %s" % (binary_type, binary_path))
131,249
Returns the user-type and password for a user. Args: username: Username for the user. password: Password for the user. If None, or not provided, we will prompt for one via the terminal. is_admin: Indicates whether the user should have admin privileges.
def _GetUserTypeAndPassword(username, password=None, is_admin=False): if is_admin: user_type = api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN else: user_type = api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD if password is None: # TODO # pytype: disable=wrong-arg-types password = getpass.getpass(prompt="Please enter password for user '%s':" % username) # pytype: enable=wrong-arg-types return user_type, password
131,256
Converts an unicode string to a filesystem safe filename. For maximum compatibility we escape all chars which are not alphanumeric (in the unicode sense). Args: name: a unicode string that is part of a subject. Returns: A safe filename with escaped special chars.
def ConvertStringToFilename(name): return re.sub( r"\W", lambda x: "%%%02X" % ord(x.group(0)), name, flags=re.UNICODE).rstrip("/")
131,273
Returns the directory/filename where the subject will be stored. Args: subject: The subject. regexes: The list of regular expressions by priority. Returns: File name and directory.
def ResolveSubjectDestination(subject, regexes): components = Components(subject) if not components: # No components to work with. return "aff4", "" # Make all the components safe to use. path = utils.JoinPath(*[ConvertStringToFilename(x) for x in components]) for route in regexes: m = route.match(path) if m: value = m.group("path") if value: base = os.path.basename(value) dirname = os.path.dirname(value) return base, dirname # Default value if nothing else matches. return "aff4", ""
131,275
Create a new file in the file store. We delegate the actual file addition to our contained implementations. Implementations can either implement the AddFile() method, returning a file like object which will be written on, or directly support the AddBlobToStore() method which can copy the VFSBlobImage efficiently. Args: fd: An AFF4 object open for read/write. external: If true, attempt to add files to stores defined as EXTERNAL.
def AddFile(self, fd, external=True): files_for_write = [] for sub_store in self.GetChildrenByPriority(allow_external=external): new_file = sub_store.AddFile(fd) if new_file: files_for_write.append(new_file) fd.Seek(0) while files_for_write: # If we got filehandles back, send them the data. data = fd.Read(self.CHUNK_SIZE) if not data: break for child in files_for_write: child.Write(data) for child in files_for_write: child.Close()
131,281
Search the index for matches starting with target_prefix. Args: index_urn: The index to use. Should be a urn that points to the sha256 namespace. target_prefix: The prefix to match against the index. limit: Either a tuple of (start, limit) or a maximum number of results to return. token: A DB token. Returns: URNs of files which have the same data as this file - as read from the index.
def Query(cls, index_urn, target_prefix="", limit=100, token=None): return data_store.DB.FileHashIndexQuery( index_urn, target_prefix, limit=limit)
131,287
Check hashes against the filestore. Blobs use the hash in the schema: aff4:/files/hash/generic/sha256/[sha256hash] Args: hashes: A list of Hash objects to check. Yields: Tuples of (RDFURN, hash object) that exist in the store.
def CheckHashes(self, hashes): hash_map = {} for hsh in hashes: if hsh.HasField("sha256"): # The canonical name of the file is where we store the file hash. hash_map[aff4.ROOT_URN.Add("files/hash/generic/sha256").Add( str(hsh.sha256))] = hsh for metadata in aff4.FACTORY.Stat(list(hash_map)): yield metadata["urn"], hash_map[metadata["urn"]]
131,291
Checks a list of hashes for presence in the store. Only unique sha1 hashes are checked, if there is duplication in the hashes input it is the caller's responsibility to maintain any necessary mappings. Args: hashes: A list of Hash objects to check. unused_external: Ignored. Yields: Tuples of (RDFURN, hash object) that exist in the store.
def CheckHashes(self, hashes, unused_external=True): hash_map = {} for hsh in hashes: if hsh.HasField("sha1"): hash_urn = self.PATH.Add(str(hsh.sha1)) logging.debug("Checking URN %s", str(hash_urn)) hash_map[hash_urn] = hsh for metadata in aff4.FACTORY.Stat(list(hash_map)): yield metadata["urn"], hash_map[metadata["urn"]]
131,299
Begins an enrollment flow for this client. Args: message: The Certificate sent by the client. Note that this message is not authenticated.
def ProcessMessage(self, message): cert = rdf_crypto.Certificate(message.payload) queue = self.well_known_session_id.Queue() client_id = message.source # It makes no sense to enrol the same client multiple times, so we # eliminate duplicates. Note, that we can still enroll clients multiple # times due to cache expiration. try: enrolment_cache.Get(client_id) return except KeyError: enrolment_cache.Put(client_id, 1) # Create a new client object for this client. if data_store.AFF4Enabled(): client = aff4.FACTORY.Create( client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token) client_cert = client.Get(client.Schema.CERT) if data_store.RelationalDBEnabled(): try: md = data_store.REL_DB.ReadClientMetadata(client_id.Basename()) client_cert = md.certificate except db.UnknownClientError: client_cert = None if data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientMetadata( client_id.Basename(), fleetspeak_enabled=False) # Only enroll this client if it has no certificate yet. if not client_cert: # Start the enrollment flow for this client. # Note, that the actual CAEnroler class is autogenerated from the # CAEnrolerMixin by the DualDBFlow decorator confusing the linter - hence # the disable directive. flow.StartAFF4Flow( client_id=client_id, flow_name=CAEnroler.__name__, # pylint: disable=undefined-variable csr=cert, queue=queue, token=self.token)
131,308
Validates artifact syntax. This method can be used to validate individual artifacts as they are loaded, without needing all artifacts to be loaded first, as for Validate(). Args: rdf_artifact: RDF object artifact. Raises: ArtifactSyntaxError: If artifact syntax is invalid.
def ValidateSyntax(rdf_artifact): if not rdf_artifact.doc: raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "missing doc") for supp_os in rdf_artifact.supported_os: valid_os = rdf_artifact.SUPPORTED_OS_LIST if supp_os not in valid_os: detail = "invalid `supported_os` ('%s' not in %s)" % (supp_os, valid_os) raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail) for condition in rdf_artifact.conditions: # FIXME(hanuszczak): It does not look like the code below can throw # `ConditionException`. Do we really need it then? try: of = objectfilter.Parser(condition).Parse() of.Compile(objectfilter.BaseFilterImplementation) except rdf_artifacts.ConditionError as e: detail = "invalid condition '%s'" % condition raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail, e) for label in rdf_artifact.labels: if label not in rdf_artifact.ARTIFACT_LABELS: raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "invalid label '%s'" % label) # Anything listed in provides must be defined in the KnowledgeBase valid_provides = rdf_client.KnowledgeBase().GetKbFieldNames() for kb_var in rdf_artifact.provides: if kb_var not in valid_provides: detail = "broken `provides` ('%s' not in %s)" % (kb_var, valid_provides) raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail) # Any %%blah%% path dependencies must be defined in the KnowledgeBase for dep in GetArtifactPathDependencies(rdf_artifact): if dep not in valid_provides: detail = "broken path dependencies ('%s' not in %s)" % (dep, valid_provides) raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail) for source in rdf_artifact.sources: try: source.Validate() except rdf_artifacts.ArtifactSourceSyntaxError as e: raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "bad source", e)
131,320
Validates artifact dependencies. This method checks whether all dependencies of the artifact are present and contain no errors. This method can be called only after all other artifacts have been loaded. Args: rdf_artifact: RDF object artifact. Raises: ArtifactDependencyError: If a dependency is missing or contains errors.
def ValidateDependencies(rdf_artifact): for dependency in GetArtifactDependencies(rdf_artifact): try: dependency_obj = REGISTRY.GetArtifact(dependency) except rdf_artifacts.ArtifactNotRegisteredError as e: raise rdf_artifacts.ArtifactDependencyError( rdf_artifact, "missing dependency", cause=e) message = dependency_obj.error_message if message: raise rdf_artifacts.ArtifactDependencyError( rdf_artifact, "dependency error", cause=message)
131,321
Return a set of artifact dependencies. Args: rdf_artifact: RDF object artifact. recursive: If True recurse into dependencies to find their dependencies. depth: Used for limiting recursion depth. Returns: A set of strings containing the dependent artifact names. Raises: RuntimeError: If maximum recursion depth reached.
def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1): deps = set() for source in rdf_artifact.sources: # ARTIFACT is the legacy name for ARTIFACT_GROUP # per: https://github.com/ForensicArtifacts/artifacts/pull/143 # TODO(user): remove legacy support after migration. if source.type in (rdf_artifacts.ArtifactSource.SourceType.ARTIFACT, rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP): if source.attributes.GetItem("names"): deps.update(source.attributes.GetItem("names")) if depth > 10: raise RuntimeError("Max artifact recursion depth reached.") deps_set = set(deps) if recursive: for dep in deps: artifact_obj = REGISTRY.GetArtifact(dep) new_dep = GetArtifactDependencies(artifact_obj, True, depth=depth + 1) if new_dep: deps_set.update(new_dep) return deps_set
131,322
Return a set of knowledgebase path dependencies. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"]
def GetArtifactPathDependencies(rdf_artifact): deps = set() for source in rdf_artifact.sources: for arg, value in iteritems(source.attributes): paths = [] if arg in ["path", "query"]: paths.append(value) if arg == "key_value_pairs": # This is a REGISTRY_VALUE {key:blah, value:blah} dict. paths.extend([x["key"] for x in value]) if arg in ["keys", "paths", "path_list", "content_regex_list"]: paths.extend(value) for path in paths: for match in artifact_utils.INTERPOLATED_REGEX.finditer(path): deps.add(match.group()[2:-2]) # Strip off %%. deps.update(GetArtifactParserDependencies(rdf_artifact)) return deps
131,324
Return the set of knowledgebase path dependencies required by the parser. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"]
def GetArtifactParserDependencies(rdf_artifact): deps = set() processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name) for p in processors: deps.update(p.knowledgebase_dependencies) return deps
131,325
Adds a directory path as a source. Args: dirpath: a string representing a path to the directory. Returns: True if the directory is not an already existing source.
def AddDir(self, dirpath): if dirpath not in self._dirs: self._dirs.add(dirpath) return True return False
131,327
Adds a file path as a source. Args: filepath: a string representing a path to the file. Returns: True if the file is not an already existing source.
def AddFile(self, filepath): if filepath not in self._files: self._files.add(filepath) return True return False
131,328
Adds a datastore URN as a source. Args: urn: an RDF URN value of the datastore. Returns: True if the datastore is not an already existing source.
def AddDatastore(self, urn): if urn not in self._datastores: self._datastores.add(urn) return True return False
131,329
Get artifact by name. Args: name: artifact name string. Returns: artifact object. Raises: ArtifactNotRegisteredError: if artifact doesn't exist in the registy.
def GetArtifact(self, name): self._CheckDirty() result = self._artifacts.get(name) if not result: raise rdf_artifacts.ArtifactNotRegisteredError( "Artifact %s missing from registry. You may need to sync the " "artifact repo by running make in the artifact directory." % name) return result
131,343
Returns the session IDs of all the flows we launched. Args: flow_type: The type of flows to fetch. Can be "all", "outstanding" or "finished". Returns: A list of flow URNs.
def GetLaunchedFlows(self, flow_type="outstanding"): result = None all_clients = set(self.ListAllClients()) finished_clients = set(self.ListFinishedClients()) outstanding_clients = all_clients - finished_clients if flow_type == "all": result = all_clients elif flow_type == "finished": result = finished_clients elif flow_type == "outstanding": result = outstanding_clients # Now get the flows for all these clients. flows = aff4.FACTORY.MultiListChildren( [self.urn.Add(x.Basename()) for x in result]) return [x[0] for _, x in flows]
131,377
Schedule all flows without using the Foreman. Since we know all the client ids to run on we might as well just schedule all the flows and wait for the results. Args: token: A datastore access token.
def ManuallyScheduleClients(self, token=None): client_ids = set() for flow_request in self.args.flows: for client_id in flow_request.client_ids: client_ids.add(client_id) self.StartClients(self.session_id, client_ids, token=token)
131,380
Try and give a 'stat' for something not in the data store. Args: fd: The object with no stat. Returns: A dictionary corresponding to what we'll say the 'stat' is for objects which are not actually files, so have no OS level stat.
def MakePartialStat(self, fd): is_dir = "Container" in fd.behaviours return { "pathspec": fd.Get(fd.Schema.PATHSPEC, ""), "st_atime": fd.Get(fd.Schema.LAST, 0), "st_blksize": 0, "st_blocks": 0, "st_ctime": 0, "st_dev": 0, "st_gid": 0, "st_ino": 0, "st_mode": self.default_dir_mode if is_dir else self.default_file_mode, "st_mtime": 0, "st_nlink": 0, "st_rdev": 0, "st_size": fd.Get(fd.Schema.SIZE, 0), "st_uid": 0 }
131,386
Reads a directory given by path. Args: path: The path to list children of. fh: A file handler. Not used. Yields: A generator of filenames. Raises: FuseOSError: If we try and list a file.
def Readdir(self, path, fh=None): del fh # We can't read a path if it's a file. if not self._IsDir(path): raise fuse.FuseOSError(errno.ENOTDIR) fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token) children = fd.ListChildren() # Make these special directories unicode to be consistent with the rest of # aff4. for directory in [u".", u".."]: yield directory # ListChildren returns a generator, so we do the same. for child in children: # Filter out any directories we've chosen to ignore. if child.Path() not in self.ignored_dirs: yield child.Basename()
131,387
Performs a stat on a file or directory. Args: path: The path to stat. fh: A file handler. Not used. Returns: A dictionary mapping st_ names to their values. Raises: FuseOSError: When a path is supplied that grr doesn't know about, ie an invalid file path. ValueError: If an empty path is passed. (The empty string, when passed to self.root.Add, returns a path for aff4:/, the root directory, which is not the behaviour we want.)
def Getattr(self, path, fh=None): del fh if not path: raise fuse.FuseOSError(errno.ENOENT) if path != self.root: full_path = self.root.Add(path) else: full_path = path fd = aff4.FACTORY.Open(full_path, token=self.token) # The root aff4 path technically doesn't exist in the data store, so # it is a special case. if full_path == "/": return self.MakePartialStat(fd) fd = aff4.FACTORY.Open(full_path, token=self.token) # Grab the stat according to aff4. aff4_stat = fd.Get(fd.Schema.STAT) # If the Schema for the object has a STAT attribute, go ahead and return # it as a dictionary. if aff4_stat: return aff4_stat.AsDict() # If the object didn't have a stored stat, we figure out if it is a special # grr object, or just doesn't exist. # We now check if the aff4 object actually has a row in the data store. # This prevents us from being able to cd to directories that don't exist, # since such directories have a newly-created empty AFF4Object, # but no row in the data store. Anything that is a # row in the data store will have a LAST attribute, so we check that. elif fd.Get(fd.Schema.LAST) is None: # We raise the "no such file or directory" error. raise fuse.FuseOSError(errno.ENOENT) else: # This is an object that exists in the datastore, but has no STAT, so we # don't know how to handle it. pass # If the object was in the data store, but didn't have a stat, we just # try and guess some sensible values. return self.MakePartialStat(fd)
131,388
Reads data from a file. Args: path: The path to the file to read. length: How many bytes to read. offset: Offset in bytes from which reading should start. fh: A file handler. Not used. Returns: A string containing the file contents requested. Raises: FuseOSError: If we try and read a directory or if we try and read an object that doesn't support reading.
def Read(self, path, length=None, offset=0, fh=None): del fh if self._IsDir(path): raise fuse.FuseOSError(errno.EISDIR) fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token) # If the object has Read() and Seek() methods, let's use them. if all((hasattr(fd, "Read"), hasattr(fd, "Seek"), callable(fd.Read), callable(fd.Seek))): # By default, read the whole file. if length is None: length = fd.Get(fd.Schema.SIZE) fd.Seek(offset) return fd.Read(length) else: # If we don't have Read/Seek methods, we probably can't read this object. raise fuse.FuseOSError(errno.EIO)
131,389
True if we need to update this path from the client. Args: path: The path relative to the root to check freshness of. last: An aff4:last attribute to check freshness of. At least one of path or last must be supplied. Returns: True if the path hasn't been updated in the last self.max_age_before_refresh seconds, else False. Raises: type_info.TypeValueError: If no arguments are supplied.
def DataRefreshRequired(self, path=None, last=None): # If we didn't get given a last attribute, use the path to get one from the # object. if last is None: if path is None: # If we didn't get a path either, we can't do anything. raise type_info.TypeValueError("Either 'path' or 'last' must" " be supplied as an argument.") fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token) # We really care about the last time the stat was updated, so we use # this instead of the LAST attribute, which is the last time anything # was updated about the object. stat_obj = fd.Get(fd.Schema.STAT) if stat_obj: last = stat_obj.age else: last = rdfvalue.RDFDatetime(0) # If the object doesn't even have a LAST attribute by this point, # we say it hasn't been accessed within the cache expiry time. if last is None: return True last = last.AsDatetime() # Remember to use UTC time, since that's what the datastore uses. return datetime.datetime.utcnow() - last > self.max_age_before_refresh
131,391
Updates the directory listing from the client. Args: path: The path to the directory to update. Client is inferred from this. fh: A file handler. Not used. Returns: A list of filenames.
def Readdir(self, path, fh=None): if self.DataRefreshRequired(path): self._RunAndWaitForVFSFileUpdate(path) return super(GRRFuse, self).Readdir(path, fh=None)
131,393
Return which chunks a file doesn't have. Specifically, we return a list of the chunks specified by a length-offset range which are not in the datastore. Args: fd: The database object to read chunks from. length: Length to read. offset: File offset to read from. Returns: A list of chunk numbers.
def GetMissingChunks(self, fd, length, offset): start_chunk = offset // fd.chunksize end_chunk = (offset + length - 1) // fd.chunksize relevant_chunks = range(start_chunk, end_chunk + 1) missing_chunks = set(relevant_chunks) for idx, metadata in iteritems(fd.ChunksMetadata(relevant_chunks)): if not self.DataRefreshRequired(last=metadata.get("last", None)): missing_chunks.remove(idx) return sorted(missing_chunks)
131,394
Calls into the IOKit to load a kext by file-system path. Apple kext API doco here: http://developer.apple.com/library/mac/#documentation/IOKit/Reference/ KextManager_header_reference/Reference/reference.html Args: kext_path: Absolute or relative POSIX path to the kext. Raises: OSError: On failure to load the kext.
def InstallDriver(kext_path): km = objc.KextManager() cf_kext_path = km.PyStringToCFString(kext_path) kext_url = km.dll.CFURLCreateWithFileSystemPath( objc.CF_DEFAULT_ALLOCATOR, cf_kext_path, objc.POSIX_PATH_STYLE, True) status = km.iokit.KextManagerLoadKextWithURL(kext_url, None) km.dll.CFRelease(kext_url) km.dll.CFRelease(cf_kext_path) if status is not objc.OS_SUCCESS: raise OSError("Failed to load kext at {0}: {1}".format(kext_path, status))
131,402
Calls into the IOKit to unload a kext by its name. Args: bundle_name: The bundle identifier of the kernel extension as defined in Info.plist field CFBundleIdentifier. Returns: The error code from the library call. objc.OS_SUCCESS if successfull.
def UninstallDriver(bundle_name): km = objc.KextManager() cf_bundle_name = km.PyStringToCFString(bundle_name) status = km.iokit.KextManagerUnloadKextWithIdentifier(cf_bundle_name) km.dll.CFRelease(cf_bundle_name) return status
131,403
Initializes UnknownSignedBinaryError. Args: binary_id: rdf_objects.SignedBinaryID for the signed binary. cause: A lower-level Exception raised by the database driver, which might have more details about the error.
def __init__(self, binary_id, cause=None): super(UnknownSignedBinaryError, self).__init__(binary_id, cause=cause) self.binary_id = binary_id self.message = ("Signed binary of type %s and path %s was not found" % (self.binary_id.binary_type, self.binary_id.path))
131,427
Reads the ClientMetadata record for a single client. Args: client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7". Returns: An rdfvalues.object.ClientMetadata object. Raises: UnknownClientError: if no client with corresponding id was found.
def ReadClientMetadata(self, client_id): result = self.MultiReadClientMetadata([client_id]) try: return result[client_id] except KeyError: raise UnknownClientError(client_id)
131,450
Reads full client information for a single client. Args: client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7". Returns: A `ClientFullInfo` instance for given client. Raises: UnknownClientError: if no client with such id was found.
def ReadClientFullInfo(self, client_id): result = self.MultiReadClientFullInfo([client_id]) try: return result[client_id] except KeyError: raise UnknownClientError(client_id)
131,451
Iterates over all available clients and yields full info protobufs. Args: min_last_ping: If not None, only the clients with last-ping timestamps newer than (or equal to) min_last_ping will be returned. batch_size: Always reads <batch_size> client full infos at a time. Yields: An rdfvalues.objects.ClientFullInfo object for each client in the db.
def IterateAllClientsFullInfo(self, min_last_ping=None, batch_size=50000): all_client_ids = self.ReadAllClientIDs(min_last_ping=min_last_ping) for batch in collection.Batch(all_client_ids, batch_size): res = self.MultiReadClientFullInfo(batch) for full_info in itervalues(res): yield full_info
131,452
Iterates over all available clients and yields client snapshot objects. Args: min_last_ping: If provided, only snapshots for clients with last-ping timestamps newer than (or equal to) the given value will be returned. batch_size: Always reads <batch_size> snapshots at a time. Yields: An rdfvalues.objects.ClientSnapshot object for each client in the db.
def IterateAllClientSnapshots(self, min_last_ping=None, batch_size=50000): all_client_ids = self.ReadAllClientIDs(min_last_ping=min_last_ping) for batch in collection.Batch(all_client_ids, batch_size): res = self.MultiReadClientSnapshot(batch) for snapshot in itervalues(res): if snapshot: yield snapshot
131,453
Lists path info records that correspond to children of given path. Args: client_id: An identifier string for a client. path_type: A type of a path to retrieve path information for. components: A tuple of path components of a path to retrieve child path information for. timestamp: If set, lists only descendants that existed only at that timestamp. Returns: A list of `rdf_objects.PathInfo` instances sorted by path components.
def ListChildPathInfos(self, client_id, path_type, components, timestamp=None): return self.ListDescendentPathInfos( client_id, path_type, components, max_depth=1, timestamp=timestamp)
131,454
Initializes a collection of path info records for a client. Unlike `WritePathInfo`, this method clears stat and hash histories of paths associated with path info records. This method is intended to be used only in the data migration scripts. Args: client_id: A client identifier for which the paths are to be initialized. path_infos: A list of `rdf_objects.PathInfo` objects to write.
def InitPathInfos(self, client_id, path_infos): self.ClearPathHistory(client_id, path_infos) self.WritePathInfos(client_id, path_infos)
131,455
Writes a collection of `StatEntry` observed for particular path. Args: client_path: A `ClientPath` instance. stat_entries: A dictionary with timestamps as keys and `StatEntry` instances as values.
def WritePathStatHistory(self, client_path, stat_entries): client_path_history = ClientPathHistory() for timestamp, stat_entry in iteritems(stat_entries): client_path_history.AddStatEntry(timestamp, stat_entry) self.MultiWritePathHistory({client_path: client_path_history})
131,456