docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Constructor. Args: max_size: The maximum number of objects held in cache.
def __init__(self, max_size=10): # This class implements a LRU cache which needs fast updates of the LRU # order for random elements. This is usually implemented by using a # dict for fast lookups and a linked list for quick deletions / insertions. self._age = LinkedList() self._hash = {} self._limit = max_size self.lock = threading.RLock()
129,877
Fetch the object from cache. Objects may be flushed from cache at any time. Callers must always handle the possibility of KeyError raised here. Args: key: The key used to access the object. Returns: Cached object. Raises: KeyError: If the object is not present in the cache.
def Get(self, key): if key not in self._hash: raise KeyError(key) node = self._hash[key] self._age.Unlink(node) self._age.AppendNode(node) return node.data
129,885
Constructor. This cache will refresh the age of the cached object as long as they are accessed within the allowed age. The age refers to the time since it was last touched. Args: max_size: The maximum number of objects held in cache. max_age: The maximum length of time an object is considered alive.
def __init__(self, max_size=10, max_age=600): super(TimeBasedCache, self).__init__(max_size) self.max_age = max_age def HouseKeeper(): if not time: # This might happen when the main thread exits, we don't want to raise. return now = time.time() for cache in TimeBasedCache.active_caches: # Only expunge while holding the lock on the data store. with cache.lock: # pylint: disable=protected-access # We need to take a copy of the value list because we are changing # this dict during the iteration. for node in list(itervalues(cache._hash)): timestamp, obj = node.data # Expire the object if it is too old. if timestamp + cache.max_age < now: cache.KillObject(obj) cache._age.Unlink(node) cache._hash.pop(node.key, None) # pylint: enable=protected-access if not TimeBasedCache.house_keeper_thread: TimeBasedCache.active_caches = weakref.WeakSet() # This thread is designed to never finish. TimeBasedCache.house_keeper_thread = InterruptableThread( name="HouseKeeperThread", target=HouseKeeper) TimeBasedCache.house_keeper_thread.start() TimeBasedCache.active_caches.add(self)
129,887
Generate ZipInfo instance for the given name, compression and stat. Args: arcname: The name in the archive this should take. compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED) st: An optional stat object to be used for setting headers. Returns: ZipInfo instance. Raises: ValueError: If arcname is not provided.
def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None): # Fake stat response. if st is None: # TODO(user):pytype: stat_result typing is not correct. # pytype: disable=wrong-arg-count st = os.stat_result((0o100644, 0, 0, 0, 0, 0, 0, 0, 0, 0)) # pytype: enable=wrong-arg-count mtime = time.localtime(st.st_mtime or time.time()) date_time = mtime[0:6] # Create ZipInfo instance to store file information if arcname is None: raise ValueError("An arcname must be provided.") zinfo = zipfile.ZipInfo(arcname, date_time) zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes if compress_type is None: zinfo.compress_type = self._compression else: zinfo.compress_type = compress_type zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.flag_bits = 0x08 # Setting data descriptor flag. zinfo.CRC = 0x08074b50 # Predefined CRC for archives using data # descriptors. # This fills an empty Info-ZIP Unix extra field. zinfo.extra = struct.pack( "<HHIIHH", 0x5855, 12, 0, # time of last access (UTC/GMT) 0, # time of last modification (UTC/GMT) 0, # user ID 0) # group ID return zinfo
129,900
Write a zip member from a file like object. Args: src_fd: A file like object, must support seek(), tell(), read(). arcname: The name in the archive this should take. compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED) st: An optional stat object to be used for setting headers. Raises: ArchiveAlreadyClosedError: If the zip if already closed. Yields: Chunks of binary data.
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None): yield self.WriteFileHeader( arcname=arcname, compress_type=compress_type, st=st) while 1: buf = src_fd.read(1024 * 1024) if not buf: break yield self.WriteFileChunk(buf) yield self.WriteFileFooter()
129,905
Return all audit log entries between now-offset and now. Args: offset: rdfvalue.Duration how far back to look in time now: rdfvalue.RDFDatetime for current time token: GRR access token Yields: AuditEvents created during the time range
def GetAuditLogEntries(offset, now, token): start_time = now - offset - audit.AUDIT_ROLLOVER_TIME for fd in audit.LegacyAuditLogsForTimespan(start_time, now, token): for event in fd.GenerateItems(): if now - offset < event.timestamp < now: yield event
129,929
Fetches client data from the relational db. Args: recency_window: An rdfvalue.Duration specifying a window of last-ping timestamps to consider. Clients that haven't communicated with GRR servers longer than the given period will be skipped. If recency_window is None, all clients will be iterated. Yields: Batches (lists) of ClientFullInfo objects.
def _IterateAllClients(recency_window=None): if recency_window is None: min_last_ping = None else: min_last_ping = rdfvalue.RDFDatetime.Now() - recency_window client_ids = data_store.REL_DB.ReadAllClientIDs(min_last_ping=min_last_ping) for client_id_batch in collection.Batch(client_ids, CLIENT_READ_BATCH_SIZE): client_info_dict = data_store.REL_DB.MultiReadClientFullInfo( client_id_batch) yield list(itervalues(client_info_dict))
129,930
Constructor. Args: report_type: rdf_stats.ClientGraphSeries.ReportType for the client stats to track.
def __init__(self, report_type): self._report_type = report_type self.categories = dict([(x, {}) for x in self.active_days])
129,931
Adds another instance of this category into the active_days counter. We automatically count the event towards all relevant active_days. For example, if the category "Windows" was seen 8 days ago it will be counted towards the 30 day active, 14 day active but not against the 7 and 1 day actives. Args: category: The category name to account this instance against. label: Client label to which this should be applied. age: When this instance occurred.
def Add(self, category, label, age): now = rdfvalue.RDFDatetime.Now() category = utils.SmartUnicode(category) for active_time in self.active_days: self.categories[active_time].setdefault(label, {}) if (now - age).seconds < active_time * 24 * 60 * 60: self.categories[active_time][label][ category] = self.categories[active_time][label].get(category, 0) + 1
129,932
Delete a GRR temp file. To limit possible damage the path must be absolute and either the file must be within any of the Client.tempdir_roots or the file name must begin with Client.tempfile_prefix. Args: path: path string to file to be deleted. Raises: OSError: Permission denied, or file not found. ErrorBadPath: Path must be absolute. ErrorNotTempFile: Filename must start with Client.tempfile_prefix. ErrorNotAFile: File to delete does not exist.
def DeleteGRRTempFile(path): precondition.AssertType(path, Text) if not os.path.isabs(path): raise ErrorBadPath("Path must be absolute") prefix = config.CONFIG["Client.tempfile_prefix"] directories = [ GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"] ] if not _CheckIfPathIsValidForDeletion( path, prefix=prefix, directories=directories): msg = ("Can't delete temp file %s. Filename must start with %s " "or lie within any of %s.") raise ErrorNotTempFile(msg % (path, prefix, ";".join(directories))) if os.path.exists(path): # Clear our file handle cache so the file can be deleted. files.FILE_HANDLE_CACHE.Flush() os.remove(path) else: raise ErrorNotAFile("%s does not exist." % path)
129,967
Returns the provided token or the default token. Args: token: A token or None. Raises: access_control.UnauthorizedAccess: no token was provided.
def GetDefaultToken(token): if token is None: token = default_token if not isinstance(token, access_control.ACLToken): raise access_control.UnauthorizedAccess( "Token is not properly specified. It should be an " "instance of grr.lib.access_control.ACLToken()") return token
129,988
Returns a list of Tasks leased for a certain time. Args: queue: The queue to query from. lease_seconds: The tasks will be leased for this long. limit: Number of values to fetch. timestamp: Range of times for consideration. Returns: A list of GrrMessage() objects leased.
def QueueQueryAndOwn(self, queue, lease_seconds, limit, timestamp): # Do the real work in a transaction try: lock = DB.LockRetryWrapper(queue, lease_time=lease_seconds) return self._QueueQueryAndOwn( lock.subject, lease_seconds=lease_seconds, limit=limit, timestamp=timestamp) except DBSubjectLockError: # This exception just means that we could not obtain the lock on the queue # so we just return an empty list, let the worker sleep and come back to # fetch more tasks. return [] except Error as e: logging.warning("Datastore exception: %s", e) return []
130,005
Retry a DBSubjectLock until it succeeds. Args: subject: The subject which the lock applies to. retrywrap_timeout: How long to wait before retrying the lock. retrywrap_max_timeout: The maximum time to wait for a retry until we raise. blocking: If False, raise on first lock failure. lease_time: lock lease time in seconds. Returns: The DBSubjectLock object Raises: DBSubjectLockError: If the maximum retry count has been reached.
def LockRetryWrapper(self, subject, retrywrap_timeout=1, retrywrap_max_timeout=10, blocking=True, lease_time=None): timeout = 0 while timeout < retrywrap_max_timeout: try: return self.DBSubjectLock(subject, lease_time=lease_time) except DBSubjectLockError: if not blocking: raise stats_collector_instance.Get().IncrementCounter("datastore_retries") time.sleep(retrywrap_timeout) timeout += retrywrap_timeout raise DBSubjectLockError("Retry number exceeded.")
130,014
Remove all specified attributes from a list of subjects. Args: subjects: The list of subjects that will have these attributes removed. attributes: A list of attributes. start: A timestamp, attributes older than start will not be deleted. end: A timestamp, attributes newer than end will not be deleted. sync: If true we block until the operation completes.
def MultiDeleteAttributes(self, subjects, attributes, start=None, end=None, sync=True): for subject in subjects: self.DeleteAttributes( subject, attributes, start=start, end=end, sync=sync)
130,016
Reads responses for one request. Args: session_id: The session id to use. request_id: The id of the request. timestamp: A timestamp as used in the data store. Yields: fetched responses for the request
def ReadResponsesForRequestId(self, session_id, request_id, timestamp=None): request = rdf_flow_runner.RequestState(id=request_id, session_id=session_id) for _, responses in self.ReadResponses([request], timestamp=timestamp): return responses
130,028
Reads responses for multiple requests at the same time. Args: request_list: The list of requests the responses should be fetched for. timestamp: A timestamp as used in the data store. Yields: tuples (request, lists of fetched responses for the request)
def ReadResponses(self, request_list, timestamp=None): response_subjects = {} for request in request_list: response_subject = self.GetFlowResponseSubject(request.session_id, request.id) response_subjects[response_subject] = request response_data = dict( self.MultiResolvePrefix( response_subjects, self.FLOW_RESPONSE_PREFIX, timestamp=timestamp)) for response_urn, request in sorted(iteritems(response_subjects)): responses = [] for _, serialized, timestamp in response_data.get(response_urn, []): msg = rdf_flows.GrrMessage.FromSerializedString(serialized) msg.timestamp = timestamp responses.append(msg) yield (request, sorted(responses, key=lambda msg: msg.response_id))
130,029
Stores new flow requests and responses to the data store. Args: new_requests: A list of tuples (request, timestamp) to store in the data store. new_responses: A list of tuples (response, timestamp) to store in the data store. requests_to_delete: A list of requests that should be deleted from the data store.
def StoreRequestsAndResponses(self, new_requests=None, new_responses=None, requests_to_delete=None): to_write = {} if new_requests is not None: for request, timestamp in new_requests: subject = request.session_id.Add("state") queue = to_write.setdefault(subject, {}) queue.setdefault(self.FLOW_REQUEST_TEMPLATE % request.id, []).append( (request.SerializeToString(), timestamp)) if new_responses is not None: for response, timestamp in new_responses: # Status messages cause their requests to be marked as complete. This # allows us to quickly enumerate all the completed requests - it is # essentially an index for completed requests. if response.type == rdf_flows.GrrMessage.Type.STATUS: subject = response.session_id.Add("state") attribute = self.FLOW_STATUS_TEMPLATE % response.request_id to_write.setdefault(subject, {}).setdefault(attribute, []).append( (response.SerializeToString(), timestamp)) subject = self.GetFlowResponseSubject(response.session_id, response.request_id) attribute = self.FLOW_RESPONSE_TEMPLATE % (response.request_id, response.response_id) to_write.setdefault(subject, {}).setdefault(attribute, []).append( (response.SerializeToString(), timestamp)) to_delete = {} if requests_to_delete is not None: for request in requests_to_delete: queue = to_delete.setdefault(request.session_id.Add("state"), []) queue.append(self.FLOW_REQUEST_TEMPLATE % request.id) queue.append(self.FLOW_STATUS_TEMPLATE % request.id) for subject in set(to_write) | set(to_delete): self.MultiSet( subject, to_write.get(subject, {}), to_delete=to_delete.get(subject, []), sync=True)
130,030
Deletes all requests and responses for the given flows. Args: session_ids: A lists of flows to destroy. request_limit: A limit on the number of requests to delete. Returns: A list of requests that were deleted.
def MultiDestroyFlowStates(self, session_ids, request_limit=None): subjects = [session_id.Add("state") for session_id in session_ids] to_delete = [] deleted_requests = [] for subject, values in self.MultiResolvePrefix( subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit): for _, serialized, _ in values: request = rdf_flow_runner.RequestState.FromSerializedString(serialized) deleted_requests.append(request) # Drop all responses to this request. response_subject = self.GetFlowResponseSubject(request.session_id, request.id) to_delete.append(response_subject) # Mark the request itself for deletion. to_delete.append(subject) # Drop them all at once. self.DeleteSubjects(to_delete, sync=True) return deleted_requests
130,033
Reads all index entries for the given collection. Args: collection_id: ID of the collection for which the indexes should be retrieved. Yields: Tuples (index, ts, suffix).
def CollectionReadIndex(self, collection_id): for (attr, value, ts) in self.ResolvePrefix(collection_id, self.COLLECTION_INDEX_ATTRIBUTE_PREFIX): i = int(attr[len(self.COLLECTION_INDEX_ATTRIBUTE_PREFIX):], 16) yield (i, ts, int(value, 16))
130,041
Retrieves tasks from a queue without leasing them. This is good for a read only snapshot of the tasks. Args: queue: The task queue that this task belongs to, usually client.Queue() where client is the ClientURN object you want to schedule msgs on. limit: Number of values to fetch. Returns: A list of Task() objects.
def QueueQueryTasks(self, queue, limit=1): prefix = DataStore.QUEUE_TASK_PREDICATE_PREFIX all_tasks = [] for _, serialized, ts in self.ResolvePrefix( queue, prefix, timestamp=DataStore.ALL_TIMESTAMPS): task = rdf_flows.GrrMessage.FromSerializedString(serialized) task.leased_until = ts all_tasks.append(task) return all_tasks[:limit]
130,044
Search the index for matches starting with target_prefix. Args: subject: The index to use. Should be a urn that points to the sha256 namespace. target_prefix: The prefix to match against the index. limit: Either a tuple of (start, limit) or a maximum number of results to return. Yields: URNs of files which have the same data as this file - as read from the index.
def FileHashIndexQuery(self, subject, target_prefix, limit=100): if isinstance(limit, (tuple, list)): start, length = limit # pylint: disable=unpacking-non-sequence else: start = 0 length = limit prefix = (DataStore.FILE_HASH_TEMPLATE % target_prefix).lower() results = self.ResolvePrefix(subject, prefix, limit=limit) for i, (_, hit, _) in enumerate(results): if i < start: continue if i >= start + length: break yield rdfvalue.RDFURN(hit)
130,046
Obtain the subject lock for lease_time seconds. This is never called directly but produced from the DataStore.LockedSubject() factory. Args: data_store: A data_store handler. subject: The name of a subject to lock. lease_time: The minimum length of time the lock will remain valid in seconds. Note this will be converted to usec for storage. Raises: ValueError: No lease time was provided.
def __init__(self, data_store, subject, lease_time=None): self.subject = utils.SmartStr(subject) self.store = data_store # expires should be stored as usec self.expires = None self.locked = False if lease_time is None: raise ValueError("Trying to lock without a lease time.") self._Acquire(lease_time) self.lease_time = lease_time
130,050
Download an aff4 file to the local filesystem overwriting it if it exists. Args: file_obj: An aff4 object that supports the file interface (Read, Seek) target_path: Full path of file to write to. buffer_size: Read in chunks this size.
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE): logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path) target_file = open(target_path, "wb") file_obj.Seek(0) count = 0 data_buffer = file_obj.Read(buffer_size) while data_buffer: target_file.write(data_buffer) data_buffer = file_obj.Read(buffer_size) count += 1 if not count % 3: logging.debug(u"Downloading: %s: %s done", file_obj.urn, utils.FormatNumberAsString(count * buffer_size)) target_file.close()
130,053
Recursively downloads a file entry to the target path. Args: dir_obj: An aff4 object that contains children. target_dir: Full path of the directory to write to. max_depth: Depth to download to. 1 means just the directory itself. depth: Current depth of recursion. overwrite: Should we overwrite files that exist. max_threads: Use this many threads to do the downloads.
def RecursiveDownload(dir_obj, target_dir, max_depth=10, depth=1, overwrite=False, max_threads=10): if not isinstance(dir_obj, aff4.AFF4Volume): return # Reuse the same threadpool as we call recursively. thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads) thread_pool.Start() for sub_file_entry in dir_obj.OpenChildren(): path_elements = [target_dir] sub_target_dir = u"/".join(path_elements) try: # Any file-like object with data in AFF4 should inherit AFF4Stream. if isinstance(sub_file_entry, aff4.AFF4Stream): args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token, overwrite) thread_pool.AddTask( target=CopyAFF4ToLocal, args=args, name="Downloader") elif "Container" in sub_file_entry.behaviours: if depth >= max_depth: # Don't go any deeper. continue try: os.makedirs(sub_target_dir) except OSError: pass RecursiveDownload( sub_file_entry, sub_target_dir, overwrite=overwrite, depth=depth + 1) except IOError: logging.exception("Unable to download %s", sub_file_entry.urn) finally: sub_file_entry.Close() # Join and stop the threadpool. if depth <= 1: thread_pool.Stop(join_timeout=THREADPOOL_JOIN_TIMEOUT)
130,054
Copy an AFF4 object that supports a read interface to local filesystem. Args: aff4_urn: URN of thing to copy. target_dir: Directory to copy the file to. token: Auth token. overwrite: If True overwrite the file if it exists. Returns: If aff4_urn points to a file, returns path to the downloaded file. Otherwise returns None. By default file will only be overwritten if file size differs.
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False): try: fd = aff4.FACTORY.Open(aff4_urn, token=token) filepath = os.path.join(target_dir, fd.urn.Path()[1:]) # If urn points to a directory, just create it. if isinstance(fd, standard.VFSDirectory): try: os.makedirs(filepath) except OSError: pass return None # If urn points to a file, download it. elif isinstance(fd, aff4.AFF4Stream): if not os.path.isfile(filepath): try: # Ensure directory exists. os.makedirs(os.path.dirname(filepath)) except OSError: pass DownloadFile(fd, filepath) elif (os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE) or overwrite): # We should overwrite because user said, or file sizes differ. DownloadFile(fd, filepath) else: logging.info("File %s exists, skipping", filepath) return filepath else: raise ValueError("Opened urn is neither a downloaded file nor a " "directory: %s" % aff4_urn) except IOError as e: logging.exception("Failed to read %s due to %s", aff4_urn, e) raise
130,057
Iterate over all clients in a threadpool. Args: func: A function to call with each client urn. max_threads: Number of threads to use. token: Auth token. Raises: ValueError: If function not specified.
def __init__(self, func=None, max_threads=10, token=None): self.thread_pool = threadpool.ThreadPool.Factory(self.THREAD_POOL_NAME, max_threads) self.thread_pool.Start() self.token = token self.func = func self.broken_subjects = [] # Entries that are broken or fail to run. self.out_queue = queue.Queue()
130,060
Iterate over all clients in a threadpool. Args: max_age: Maximum age in seconds of clients to check. client_chunksize: A function to call with each client urn. **kwargs: Arguments passed to init.
def __init__(self, max_age, client_chunksize=25, **kwargs): super(IterateAllClients, self).__init__(**kwargs) self.client_chunksize = client_chunksize self.max_age = max_age
130,063
Return a dictionary of environment variables and their values. Implementation maps variables mentioned in https://en.wikipedia.org/wiki/Environment_variable#Windows to known KB definitions. Args: knowledge_base: A knowledgebase object. Returns: A dictionary built from a given knowledgebase object where keys are variables names and values are their values.
def GetWindowsEnvironmentVariablesMap(knowledge_base): environ_vars = {} if knowledge_base.environ_path: environ_vars["path"] = knowledge_base.environ_path if knowledge_base.environ_temp: environ_vars["temp"] = knowledge_base.environ_temp if knowledge_base.environ_systemroot: environ_vars["systemroot"] = knowledge_base.environ_systemroot if knowledge_base.environ_windir: environ_vars["windir"] = knowledge_base.environ_windir if knowledge_base.environ_programfiles: environ_vars["programfiles"] = knowledge_base.environ_programfiles environ_vars["programw6432"] = knowledge_base.environ_programfiles if knowledge_base.environ_programfilesx86: environ_vars["programfiles(x86)"] = knowledge_base.environ_programfilesx86 if knowledge_base.environ_systemdrive: environ_vars["systemdrive"] = knowledge_base.environ_systemdrive if knowledge_base.environ_allusersprofile: environ_vars["allusersprofile"] = knowledge_base.environ_allusersprofile environ_vars["programdata"] = knowledge_base.environ_allusersprofile if knowledge_base.environ_allusersappdata: environ_vars["allusersappdata"] = knowledge_base.environ_allusersappdata for user in knowledge_base.users: if user.appdata: environ_vars.setdefault("appdata", []).append(user.appdata) if user.localappdata: environ_vars.setdefault("localappdata", []).append(user.localappdata) if user.userdomain: environ_vars.setdefault("userdomain", []).append(user.userdomain) if user.userprofile: environ_vars.setdefault("userprofile", []).append(user.userprofile) return environ_vars
130,072
r"""Take a string and expand any windows environment variables. Args: data_string: A string, e.g. "%SystemRoot%\\LogFiles" knowledge_base: A knowledgebase object. Returns: A string with available environment variables expanded. If we can't expand we just return the string with the original variables.
def ExpandWindowsEnvironmentVariables(data_string, knowledge_base): r win_environ_regex = re.compile(r"%([^%]+?)%") components = [] offset = 0 for match in win_environ_regex.finditer(data_string): components.append(data_string[offset:match.start()]) # KB environment variables are prefixed with environ_. kb_value = getattr(knowledge_base, "environ_%s" % match.group(1).lower(), None) if isinstance(kb_value, string_types) and kb_value: components.append(kb_value) else: # Failed to expand, leave the variable as it was. components.append("%%%s%%" % match.group(1)) offset = match.end() components.append(data_string[offset:]) # Append the final chunk. return "".join(components)
130,073
Check if a condition matches an object. Args: condition: A string condition e.g. "os == 'Windows'" check_object: Object to validate, e.g. an rdf_client.KnowledgeBase() Returns: True or False depending on whether the condition matches. Raises: ConditionError: If condition is bad.
def CheckCondition(condition, check_object): try: of = objectfilter.Parser(condition).Parse() compiled_filter = of.Compile(objectfilter.BaseFilterImplementation) return compiled_filter.Matches(check_object) except objectfilter.Error as e: raise ConditionError(e)
130,074
r"""Take a string and expand windows user environment variables based. Args: data_string: A string, e.g. "%TEMP%\\LogFiles" knowledge_base: A knowledgebase object. sid: A Windows SID for a user to expand for. username: A Windows user name to expand for. Returns: A string with available environment variables expanded.
def ExpandWindowsUserEnvironmentVariables(data_string, knowledge_base, sid=None, username=None): r win_environ_regex = re.compile(r"%([^%]+?)%") components = [] offset = 0 for match in win_environ_regex.finditer(data_string): components.append(data_string[offset:match.start()]) kb_user = knowledge_base.GetUser(sid=sid, username=username) kb_value = None if kb_user: kb_value = getattr(kb_user, match.group(1).lower(), None) if isinstance(kb_value, string_types) and kb_value: components.append(kb_value) else: components.append("%%%s%%" % match.group(1)) offset = match.end() components.append(data_string[offset:]) # Append the final chunk. return "".join(components)
130,075
Fetches extended file attributes. Args: filepath: A path to the file. Yields: `ExtAttr` pairs.
def GetExtAttrs(filepath): path = CanonicalPathToLocalPath(filepath) try: attr_names = xattr.listxattr(path) except (IOError, OSError, UnicodeDecodeError) as error: msg = "Failed to retrieve extended attributes for '%s': %s" logging.error(msg, path, error) return # `xattr` (version 0.9.2) decodes names as UTF-8. Since we (and the system) # allows for names and values to be arbitrary byte strings, we use `bytes` # rather than `unicode` objects here. Therefore we have to re-encode what # `xattr` has decoded. Additionally, because the decoding that `xattr` does # may fail, we additionally guard against such exceptions. def EncodeUtf8(attr_name): if isinstance(attr_name, Text): return attr_name.encode("utf-8") if isinstance(attr_name, bytes): return attr_name raise TypeError("Unexpected type `%s`" % type(attr_name)) for attr_name in attr_names: attr_name = EncodeUtf8(attr_name) try: attr_value = xattr.getxattr(path, attr_name) except (IOError, OSError) as error: msg = "Failed to retrieve attribute '%s' for '%s': %s" logging.error(msg, attr_name, path, error) continue yield rdf_client_fs.ExtAttr(name=attr_name, value=attr_value)
130,079
Constructor. Args: unresponsive_kill_period: The time in seconds which we wait for a heartbeat.
def __init__(self, unresponsive_kill_period): super(NannyThread, self).__init__(name="Nanny") self.last_heart_beat_time = time.time() self.unresponsive_kill_period = unresponsive_kill_period self.running = True self.daemon = True self.proc = psutil.Process() self.memory_quota = config.CONFIG["Client.rss_max_hard"] * 1024 * 1024
130,080
Set function argument types and return types for an ObjC library. Args: libname: Library name string fn_table: List of (function, [arg types], return types) tuples Returns: ctypes.CDLL with types set according to fn_table Raises: ErrorLibNotFound: Can't find specified lib
def SetCTypesForLibrary(libname, fn_table): libpath = ctypes.util.find_library(libname) if not libpath: raise ErrorLibNotFound('Library %s not found' % libname) lib = ctypes.cdll.LoadLibrary(libpath) # We need to define input / output parameters for all functions we use for (function, args, result) in fn_table: f = getattr(lib, function) f.argtypes = args f.restype = result return lib
130,111
Package a CoreFoundation object in a Python wrapper. Args: obj: The CoreFoundation object. Returns: One of CFBoolean, CFNumber, CFString, CFDictionary, CFArray. Raises: TypeError: If the type is not supported.
def WrapCFTypeInPython(self, obj): obj_type = self.dll.CFGetTypeID(obj) if obj_type == self.dll.CFBooleanGetTypeID(): return CFBoolean(obj) elif obj_type == self.dll.CFNumberGetTypeID(): return CFNumber(obj) elif obj_type == self.dll.CFStringGetTypeID(): return CFString(obj) elif obj_type == self.dll.CFDictionaryGetTypeID(): return CFDictionary(obj) elif obj_type == self.dll.CFArrayGetTypeID(): return CFArray(obj) else: raise TypeError('Unknown type for object: {0}'.format(obj))
130,120
Copy all Job Dictionaries from the ServiceManagement. Args: domain: The name of a constant in Foundation referencing the domain. Will copy all launchd services by default. Returns: A marshalled python list of dicts containing the job dictionaries.
def SMGetJobDictionaries(self, domain='kSMDomainSystemLaunchd'): cfstring_launchd = ctypes.c_void_p.in_dll(self.dll, domain) return CFArray(self.dll.SMCopyAllJobDictionaries(cfstring_launchd))
130,122
Returns dictionary values or default. Args: key: string. Dictionary key to look up. default: string. Return this value if key not found. stringify: bool. Force all return values to string for compatibility reasons. Returns: python-wrapped CF object or default if not found.
def get(self, key, default='', stringify=True): obj = self.__getitem__(key) if obj is None: obj = default elif stringify: obj = str(obj) return obj
130,131
Client update for rpm based distros. Upgrading rpms is a bit more tricky than upgrading deb packages since there is a preinstall script that kills the running GRR daemon and, thus, also the installer process. We need to make sure we detach the child process properly and therefore cannot use client_utils_common.Execute(). Args: path: Path to the .rpm.
def _InstallRpm(self, path): pid = os.fork() if pid == 0: # This is the child that will become the installer process. cmd = "/bin/rpm" cmd_args = [cmd, "-U", "--replacepkgs", "--replacefiles", path] # We need to clean the environment or rpm will fail - similar to the # use_client_context=False parameter. env = os.environ.copy() env.pop("LD_LIBRARY_PATH", None) env.pop("PYTHON_PATH", None) # This call doesn't return. os.execve(cmd, cmd_args, env) else: # The installer will run in the background and kill the main process # so we just wait. If something goes wrong, the nanny will restart the # service after a short while and the client will come back to life. time.sleep(1000)
130,155
Creates a temporary directory based on the environment configuration. The directory will be placed in folder as specified by the `TEST_TMPDIR` environment variable if available or fallback to `Test.tmpdir` of the current configuration if not. Args: suffix: A suffix to end the directory name with. prefix: A prefix to begin the directory name with. Returns: An absolute path to the created directory.
def TempDirPath(suffix = "", prefix = "tmp"): precondition.AssertType(suffix, Text) precondition.AssertType(prefix, Text) return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=_TempRootPath())
130,157
Creates a new gzipped output tempfile for the output type. We write to JSON data to gzip_filehandle to get compressed data. We hold a reference to the original filehandle (gzip_filehandle_parent) so we can pass the gzip data to bigquery. Args: output_type: string of export type to be used in filename. e.g. ExportedFile Returns: A TempOutputTracker object
def _CreateOutputFileHandles(self, output_type): gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type) gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, "wb", self.GZIP_COMPRESSION_LEVEL, gzip_filehandle_parent) self.temp_output_trackers[output_type] = TempOutputTracker( output_type=output_type, gzip_filehandle=gzip_filehandle, gzip_filehandle_parent=gzip_filehandle_parent) return self.temp_output_trackers[output_type]
130,178
Write newline separated JSON dicts for each value. We write each dict separately so we don't have to hold all of the output streams in memory. We open and close the JSON array manually with []. Args: state: rdf_protodict.AttributedDict with the plugin's state. values: RDF values to export.
def WriteValuesToJSONFile(self, state, values): value_counters = {} max_post_size = config.CONFIG["BigQuery.max_file_post_size"] for value in values: class_name = value.__class__.__name__ output_tracker, created = self._GetTempOutputFileHandles(class_name) # If our output stream is getting huge we should flush everything now and # set up new output files. Only start checking when we are getting within # range of the limit because we need to flush the stream to check the # size. Start counting at 0 so we check each file the first time. value_counters[class_name] = value_counters.get(class_name, -1) + 1 if not value_counters[class_name] % max_post_size // 1000: # Flush our temp gzip handle so we can stat it to see how big it is. output_tracker.gzip_filehandle.flush() if os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size: # Flush what we have and get new temp output handles. self.Flush(state) value_counters[class_name] = 0 output_tracker, created = self._GetTempOutputFileHandles(class_name) if not output_tracker.schema: output_tracker.schema = self.RDFValueToBigQuerySchema(value) if created: # Omit the leading newline for the first entry in the file. self._WriteJSONValue(output_tracker.gzip_filehandle, value) else: self._WriteJSONValue( output_tracker.gzip_filehandle, value, delimiter="\n") for output_tracker in itervalues(self.temp_output_trackers): output_tracker.gzip_filehandle.flush()
130,182
Terminate a flow. Args: flow_id: The flow session_id to terminate. reason: A reason to log. status: Status code used in the generated status message. token: The access token to be used for this request. Raises: FlowError: If the flow can not be found.
def TerminateAFF4Flow(cls, flow_id, reason=None, status=None, token=None): flow_obj = aff4.FACTORY.Open( flow_id, aff4_type=GRRFlow, mode="rw", token=token) if not flow_obj: raise FlowError("Could not terminate flow %s" % flow_id) with flow_obj: runner = flow_obj.GetRunner() if not runner.IsRunning(): return if token is None: token = access_control.ACLToken() if reason is None: reason = "Manual termination by console." # This calls runner.Terminate to kill the flow runner.Error(reason, status_code=status) flow_obj.Log("Terminated by user {0}. Reason: {1}".format( token.username, reason)) # From now on we run with supervisor access super_token = token.SetUID() # Also terminate its children children_to_kill = aff4.FACTORY.MultiOpen( flow_obj.ListChildren(), token=super_token, aff4_type=GRRFlow) for child_obj in children_to_kill: cls.TerminateAFF4Flow( child_obj.urn, reason="Parent flow terminated.", token=super_token)
130,208
Returns the ResultCollection for the flow with a given flow_id. Args: flow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456. Returns: The collection containing the results for the flow identified by the id.
def ResultCollectionForFID(cls, flow_id): # TODO: Disallow/remove URNs after migration. if not isinstance(flow_id, rdfvalue.RDFURN): flow_id = rdfvalue.RDFURN(flow_id) return sequential_collection.GeneralIndexedCollection( flow_id.Add(RESULTS_SUFFIX))
130,209
This function expands paths from the args and returns registry keys. Args: args: An `rdf_file_finder.FileFinderArgs` object. Yields: `rdf_client_fs.StatEntry` instances.
def RegistryKeyFromClient(args): for path in _GetExpandedPaths(args): pathspec = rdf_paths.PathSpec( path=path, pathtype=rdf_paths.PathSpec.PathType.REGISTRY) with vfs.VFSOpen(pathspec) as file_obj: yield file_obj.Stat()
130,221
Deletes the pending notification with the given timestamp. Args: timestamp: The timestamp of the notification. Assumed to be unique. Raises: UniqueKeyError: Raised if multiple notifications have the timestamp.
def DeletePendingNotification(self, timestamp): shown_notifications = self.Get(self.Schema.SHOWN_NOTIFICATIONS) if not shown_notifications: shown_notifications = self.Schema.SHOWN_NOTIFICATIONS() pending = self.Get(self.Schema.PENDING_NOTIFICATIONS) if not pending: return # Remove all notifications with the given timestamp from pending # and add them to the shown notifications. delete_count = 0 for idx in reversed(range(0, len(pending))): if pending[idx].timestamp == timestamp: shown_notifications.Append(pending[idx]) pending.Pop(idx) delete_count += 1 if delete_count > 1: raise UniqueKeyError("Multiple notifications at %s" % timestamp) self.Set(self.Schema.PENDING_NOTIFICATIONS, pending) self.Set(self.Schema.SHOWN_NOTIFICATIONS, shown_notifications)
130,239
Retrieves a public key from the list published by Identity-Aware Proxy. The key file is re-fetched if necessary. Args: key_id: Key id. Returns: String with a key. Raises: KeyNotFoundError: if the key is not found in the key file. KeysCanNotBeFetchedError: if the key file can't be fetched.
def GetIapKey(key_id): global _KEY_CACHE key = _KEY_CACHE.get(key_id) if not key: # Re-fetch the key file. resp = requests.get("https://www.gstatic.com/iap/verify/public_key") if resp.status_code != 200: raise KeysCanNotBeFetchedError( "Unable to fetch IAP keys: {} / {} / {}".format( resp.status_code, resp.headers, resp.text)) _KEY_CACHE = resp.json() key = _KEY_CACHE.get(key_id) if not key: raise KeyNotFoundError("Key {!r} not found".format(key_id)) return key
130,266
OutputPlugin constructor. Constructor should be overridden to maintain instance-local state - i.e. state that gets accumulated during the single output plugin run and that should be used to update the global state via UpdateState method. Args: source_urn: URN of the data source to process the results from. args: This plugin's arguments. token: Security token.
def __init__(self, source_urn=None, args=None, token=None): self.source_urn = source_urn self.args = args self.token = token self.lock = threading.RLock()
130,268
Changes interface to a staticly set IP. Sets IP configs to local if no paramaters passed. Args: interface: Name of the interface. ip: IP address. subnet: Subnet mask. gw: IP address of the default gateway. Returns: A tuple of stdout, stderr, exit_status.
def NetshStaticIp(interface, ip=u'127.0.0.9', subnet=u'255.255.255.255', gw=u'127.0.0.1'): args = [ '/c', 'netsh', 'interface', 'ip', 'set', 'address', interface, 'static', ip, subnet, gw, '1' ] # pylint: disable=undefined-variable res = client_utils_common.Execute( 'cmd', args, time_limit=-1, bypass_whitelist=True) return res
130,274
Tries to disable an interface. Only works on Vista and 7. Args: interface: Name of the interface to disable. Returns: res which is a tuple of (stdout, stderr, exit_status, time_taken).
def DisableInterfaces(interface): set_tested_versions = ['vista', '2008'] set_args = ['/c', 'netsh', 'set', 'interface', interface, 'DISABLED'] host_version = platform.platform().lower() for version in set_tested_versions: if host_version.find(version) != -1: # pylint: disable=undefined-variable res = client_utils_common.Execute( 'cmd', set_args, time_limit=-1, bypass_whitelist=True) return res return ('', 'Command not available for this version.', 99, '')
130,275
Sends a message to a user. Args: msg: Message to be displaied to user. Returns: res which is a tuple of (stdout, stderr, exit_status, time_taken).
def MsgUser(msg): msg_tested_versions = ['xp', 'vista', '2008', '2003'] msg_args = ['/c', '%SystemRoot%\\System32\\msg.exe', '*', '/TIME:0'] host_version = platform.platform().lower() if not msg: return ('Command not ran.', 'Empty message.', -1) else: msg_args.extend([msg]) for version in msg_tested_versions: if host_version.find(version) != -1: # pylint: disable=undefined-variable res = client_utils_common.Execute( 'cmd', msg_args, time_limit=-1, bypass_whitelist=True) return res return ('', 'Command not available for this version.', -1)
130,277
Receives a value and fills it into a DataBlob. Args: value: value to set raise_on_error: if True, raise if we can't serialize. If False, set the key to an error string. Returns: self Raises: TypeError: if the value can't be serialized and raise_on_error is True
def SetValue(self, value, raise_on_error=True): type_mappings = [(Text, "string"), (bytes, "data"), (bool, "boolean"), (int, "integer"), (long, "integer"), (dict, "dict"), (float, "float")] if value is None: self.none = "None" elif isinstance(value, rdfvalue.RDFValue): self.rdf_value.data = value.SerializeToString() self.rdf_value.age = int(value.age) self.rdf_value.name = value.__class__.__name__ elif isinstance(value, (list, tuple)): self.list.content.Extend([ DataBlob().SetValue(v, raise_on_error=raise_on_error) for v in value ]) elif isinstance(value, set): self.set.content.Extend([ DataBlob().SetValue(v, raise_on_error=raise_on_error) for v in value ]) elif isinstance(value, dict): self.dict.FromDict(value, raise_on_error=raise_on_error) else: for type_mapping, member in type_mappings: if isinstance(value, type_mapping): setattr(self, member, value) return self message = "Unsupported type for ProtoDict: %s" % type(value) if raise_on_error: raise TypeError(message) setattr(self, "string", message) return self
130,304
Add another member to the array. Args: value: The new data to append to the array. **kwarg: Create a new element from these keywords. Returns: The value which was added. This can be modified further by the caller and changes will be propagated here. Raises: ValueError: If the value to add is not allowed.
def Append(self, value=None, **kwarg): if self.rdf_type is not None: if (isinstance(value, rdfvalue.RDFValue) and value.__class__ != self.rdf_type): raise ValueError("Can only accept %s" % self.rdf_type) try: # Try to coerce the value. value = self.rdf_type(value, **kwarg) # pylint: disable=not-callable except (TypeError, ValueError): raise ValueError("Unable to initialize %s from type %s" % (self.__class__.__name__, type(value))) self.content.Append(DataBlob().SetValue(value))
130,308
Constructs a single sample that best represents a list of samples. Args: samples: An iterable collection of `CpuSample` instances. Returns: A `CpuSample` instance representing `samples`. Raises: ValueError: If `samples` is empty.
def FromMany(cls, samples): if not samples: raise ValueError("Empty `samples` argument") # It only makes sense to average the CPU percentage. For all other values # we simply take the biggest of them. cpu_percent = sum(sample.cpu_percent for sample in samples) / len(samples) return CpuSample( timestamp=max(sample.timestamp for sample in samples), cpu_percent=cpu_percent, user_cpu_time=max(sample.user_cpu_time for sample in samples), system_cpu_time=max(sample.system_cpu_time for sample in samples))
130,309
Constructs a single sample that best represents a list of samples. Args: samples: An iterable collection of `IOSample` instances. Returns: An `IOSample` instance representing `samples`. Raises: ValueError: If `samples` is empty.
def FromMany(cls, samples): if not samples: raise ValueError("Empty `samples` argument") return IOSample( timestamp=max(sample.timestamp for sample in samples), read_bytes=max(sample.read_bytes for sample in samples), write_bytes=max(sample.write_bytes for sample in samples))
130,310
Constructs a copy of given stats but downsampled to given interval. Args: stats: A `ClientStats` instance. interval: A downsampling interval. Returns: A downsampled `ClientStats` instance.
def Downsampled(cls, stats, interval=None): interval = interval or cls.DEFAULT_SAMPLING_INTERVAL result = cls(stats) result.cpu_samples = cls._Downsample( kind=CpuSample, samples=stats.cpu_samples, interval=interval) result.io_samples = cls._Downsample( kind=IOSample, samples=stats.io_samples, interval=interval) return result
130,311
Get a User protobuf for a specific user. Args: knowledge_base: An rdf_client.KnowledgeBase object. user: Username as string. May contain domain like DOMAIN\\user. Returns: A User rdfvalue or None
def GetUserInfo(knowledge_base, user): # TODO: This docstring cannot be a raw literal because there are # issues with raw unicode literals on Python 2. Once support for Python 2 is # dropped, it can be made raw again. # pylint: disable=g-docstring-has-escape # pylint: enable=g-docstring-has-escape if "\\" in user: domain, user = user.split("\\", 1) users = [ u for u in knowledge_base.users if u.username == user and u.userdomain == domain ] else: users = [u for u in knowledge_base.users if u.username == user] if not users: return else: return users[0]
130,314
Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run.
def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args): flow_urn = flow.StartAFF4Flow( client_id=client_id, token=token, sync=True, **flow_args) WaitForFlow(flow_urn, token=token, timeout=timeout) return flow_urn
130,317
Take a string as a path on a client and interpolate with client data. Args: path: A single string/unicode to be interpolated. knowledge_base: An rdf_client.KnowledgeBase object. users: A list of string usernames, or None. path_args: A dict of additional args to use in interpolation. These take precedence over any system provided variables. depth: A counter for recursion depth. Returns: A single string if users is None, otherwise a list of strings.
def InterpolatePath(path, knowledge_base, users=None, path_args=None, depth=0): sys_formatters = { # TODO(user): Collect this during discovery from the registry. # HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\ # Value: SystemRoot "systemroot": "c:\\Windows" } # Override any system formatters with path_args. if path_args: sys_formatters.update(path_args) if users: results = [] for user in users: # Extract and interpolate user specific formatters. user = GetUserInfo(knowledge_base, user) if user: formatters = dict((x.name, y) for x, y in user.ListSetFields()) formatters.update(sys_formatters) try: results.append(path.format(**formatters)) except KeyError: pass # We may be missing values for some users. return results else: try: path = path.format(**sys_formatters) except KeyError: logging.warning("Failed path interpolation on %s", path) return "" if "{" in path and depth < 10: path = InterpolatePath( path, knowledge_base=knowledge_base, users=users, path_args=path_args, depth=depth + 1) return path
130,318
Parses request and returns a list of filter callables. Each callable will be called with the StatEntry and returns True if the entry should be suppressed. Args: request: A FindSpec that describes the search. Returns: a list of callables which return True if the file is to be suppressed.
def BuildChecks(self, request): result = [] if request.HasField("start_time") or request.HasField("end_time"): def FilterTimestamp(file_stat, request=request): return file_stat.HasField("st_mtime") and ( file_stat.st_mtime < request.start_time or file_stat.st_mtime > request.end_time) result.append(FilterTimestamp) if request.HasField("min_file_size") or request.HasField("max_file_size"): def FilterSize(file_stat, request=request): return file_stat.HasField("st_size") and ( file_stat.st_size < request.min_file_size or file_stat.st_size > request.max_file_size) result.append(FilterSize) if request.HasField("perm_mode"): def FilterPerms(file_stat, request=request): return (file_stat.st_mode & request.perm_mask) != request.perm_mode result.append(FilterPerms) if request.HasField("uid"): def FilterUID(file_stat, request=request): return file_stat.st_uid != request.uid result.append(FilterUID) if request.HasField("gid"): def FilterGID(file_stat, request=request): return file_stat.st_gid != request.gid result.append(FilterGID) if request.HasField("path_regex"): regex = request.path_regex def FilterPath(file_stat, regex=regex): return not regex.Search(file_stat.pathspec.Basename()) result.append(FilterPath) if request.HasField("data_regex"): def FilterData(file_stat, **_): return not self.TestFileContent(file_stat) result.append(FilterData) return result
130,321
Looks for approvals for an object and returns available valid tokens. Args: object_urn: Urn of the object we want access to. token: The token to use to lookup the ACLs. username: The user to get the approval for, if "" we get it from the token. Returns: A token for access to the object on success, otherwise raises. Raises: UnauthorizedAccess: If there are no valid approvals available.
def GetApprovalForObject(object_urn, token=None, username=""): if token is None: raise access_control.UnauthorizedAccess( "No token given, cannot authenticate.") if not username: username = token.username approvals_root_urn = aff4.ROOT_URN.Add("ACL").Add( object_urn.Path()).Add(username) children_urns = list(aff4.FACTORY.ListChildren(approvals_root_urn)) if not children_urns: raise access_control.UnauthorizedAccess( "No approval found for user %s" % utils.SmartStr(username), subject=object_urn) last_error = None approvals = aff4.FACTORY.MultiOpen( children_urns, mode="r", aff4_type=Approval, age=aff4.ALL_TIMES, token=token) for approval in approvals: try: test_token = access_control.ACLToken( username=username, reason=approval.Get(approval.Schema.REASON)) approval.CheckAccess(test_token) return test_token except access_control.UnauthorizedAccess as e: last_error = e if last_error: # We tried all possible approvals, but got no usable results. raise access_control.UnauthorizedAccess(last_error, subject=object_urn) else: # If last error is None, means that none of the URNs in children_urns # could be opened. This shouldn't really happen ever, but we have # to make sure to provide a meaningful error message. raise access_control.UnauthorizedAccess( "Couldn't open any of %d approvals " "for user %s" % (len(children_urns), utils.SmartStr(username)), subject=object_urn)
130,348
This function expands paths from the args and returns related stat entries. Args: args: An `rdf_file_finder.FileFinderArgs` object. Yields: `rdf_paths.PathSpec` instances.
def FileFinderOSFromClient(args): stat_cache = filesystem.StatCache() opts = args.action.stat for path in GetExpandedPaths(args): try: content_conditions = conditions.ContentCondition.Parse(args.conditions) for content_condition in content_conditions: with io.open(path, "rb") as fd: result = list(content_condition.Search(fd)) if not result: raise _SkipFileException() # TODO: `opts.resolve_links` has type `RDFBool`, not `bool`. stat = stat_cache.Get(path, follow_symlink=bool(opts.resolve_links)) stat_entry = client_utils.StatEntryFromStatPathSpec( stat, ext_attrs=opts.collect_ext_attrs) yield stat_entry except _SkipFileException: pass
130,370
Expands given path patterns. Args: args: A `FileFinderArgs` instance that dictates the behaviour of the path expansion. Yields: Absolute paths (as string objects) derived from input patterns. Raises: ValueError: For unsupported path types.
def GetExpandedPaths( args): if args.pathtype == rdf_paths.PathSpec.PathType.OS: pathtype = rdf_paths.PathSpec.PathType.OS else: raise ValueError("Unsupported path type: ", args.pathtype) opts = globbing.PathOpts( follow_links=args.follow_links, recursion_blacklist=_GetMountpointBlacklist(args.xdev), pathtype=pathtype) for path in args.paths: for expanded_path in globbing.ExpandPath(str(path), opts): yield expanded_path
130,371
Fetches a list of mountpoints. Args: only_physical: Determines whether only mountpoints for physical devices (e.g. hard disks) should be listed. If false, mountpoints for things such as memory partitions or `/dev/shm` will be returned as well. Returns: A set of mountpoints.
def _GetMountpoints(only_physical=True): partitions = psutil.disk_partitions(all=not only_physical) return set(partition.mountpoint for partition in partitions)
130,372
Builds a list of mountpoints to ignore during recursive searches. Args: xdev: A `XDev` value that determines policy for crossing device boundaries. Returns: A set of mountpoints to ignore. Raises: ValueError: If `xdev` value is invalid.
def _GetMountpointBlacklist(xdev): if xdev == rdf_file_finder.FileFinderArgs.XDev.NEVER: # Never cross device boundaries, stop at all mount points. return _GetMountpoints(only_physical=False) if xdev == rdf_file_finder.FileFinderArgs.XDev.LOCAL: # Descend into file systems on physical devices only. physical = _GetMountpoints(only_physical=True) return _GetMountpoints(only_physical=False) - physical if xdev == rdf_file_finder.FileFinderArgs.XDev.ALWAYS: # Never stop at any device boundary. return set() raise ValueError("Incorrect `xdev` value: %s" % xdev)
130,373
Waits until flow processing thread is done processing flows. Args: timeout: If specified, is a max number of seconds to spend waiting. Raises: TimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached.
def WaitUntilNoFlowsToProcess(self, timeout=None): t = self.flow_handler_thread if not t: return start_time = time.time() while True: with self.lock: # If the thread is dead, or there are no requests # to be processed/being processed, we stop waiting # and return from the function. if (not t.isAlive() or (not self._GetFlowRequestsReadyForProcessing() and not self.flow_handler_num_being_processed)): return time.sleep(0.2) if timeout and time.time() - start_time > timeout: raise TimeOutWhileWaitingForFlowsToBeProcessedError( "Flow processing didn't finish in time.")
130,457
Builds ExportedMetadata object for a given client id. Note: This is a legacy aff4-only implementation. TODO(user): deprecate as soon as REL_DB migration is done. Args: client: RDFURN of a client or VFSGRRClient object itself. token: Security token. Returns: ExportedMetadata object with metadata of the client.
def GetMetadataLegacy(client, token=None): if isinstance(client, rdfvalue.RDFURN): client_fd = aff4.FACTORY.Open(client, mode="r", token=token) else: client_fd = client metadata = ExportedMetadata() metadata.client_urn = client_fd.urn metadata.client_age = client_fd.urn.age metadata.hostname = utils.SmartUnicode( client_fd.Get(client_fd.Schema.HOSTNAME, "")) metadata.os = utils.SmartUnicode(client_fd.Get(client_fd.Schema.SYSTEM, "")) metadata.uname = utils.SmartUnicode(client_fd.Get(client_fd.Schema.UNAME, "")) metadata.os_release = utils.SmartUnicode( client_fd.Get(client_fd.Schema.OS_RELEASE, "")) metadata.os_version = utils.SmartUnicode( client_fd.Get(client_fd.Schema.OS_VERSION, "")) kb = client_fd.Get(client_fd.Schema.KNOWLEDGE_BASE) usernames = "" if kb: usernames = [user.username for user in kb.users] or "" metadata.usernames = utils.SmartUnicode(usernames) metadata.mac_address = utils.SmartUnicode( client_fd.Get(client_fd.Schema.MAC_ADDRESS, "")) system_labels = set() user_labels = set() for l in client_fd.GetLabels(): if l.owner == "GRR": system_labels.add(l.name) else: user_labels.add(l.name) metadata.labels = ",".join(sorted(system_labels | user_labels)) metadata.system_labels = ",".join(sorted(system_labels)) metadata.user_labels = ",".join(sorted(user_labels)) metadata.hardware_info = client_fd.Get(client_fd.Schema.HARDWARE_INFO) metadata.kernel_version = client_fd.Get(client_fd.Schema.KERNEL) return metadata
130,483
Constructor. Args: options: ExportOptions value, which contains settings that may or or may not affect this converter's behavior.
def __init__(self, options=None): super(ExportConverter, self).__init__() self.options = options or ExportOptions()
130,487
Converts StatEntry to ExportedFile. Does nothing if StatEntry corresponds to a registry entry and not to a file. Args: metadata: ExportedMetadata to be used for conversion. stat_entry: StatEntry to be converted. token: Security token. Returns: List or generator with resulting RDFValues. Empty list if StatEntry corresponds to a registry entry and not to a file.
def Convert(self, metadata, stat_entry, token=None): return self.BatchConvert([(metadata, stat_entry)], token=token)
130,494
Converts a batch of StatEntry value to ExportedFile values at once. Args: metadata_value_pairs: a list or a generator of tuples (metadata, value), where metadata is ExportedMetadata to be used for conversion and value is a StatEntry to be converted. token: Security token: Yields: Resulting ExportedFile values. Empty list is a valid result and means that conversion wasn't possible.
def BatchConvert(self, metadata_value_pairs, token=None): if data_store.RelationalDBEnabled(): result_generator = self._BatchConvertRelational(metadata_value_pairs) else: result_generator = self._BatchConvertLegacy( metadata_value_pairs, token=token) for r in result_generator: yield r
130,501
Converts StatEntry to ExportedRegistryKey. Does nothing if StatEntry corresponds to a file and not a registry entry. Args: metadata: ExportedMetadata to be used for conversion. stat_entry: StatEntry to be converted. token: Security token. Returns: List or generator with resulting RDFValues. Empty list if StatEntry corresponds to a file and not to a registry entry.
def Convert(self, metadata, stat_entry, token=None): if stat_entry.pathspec.pathtype != rdf_paths.PathSpec.PathType.REGISTRY: return [] result = ExportedRegistryKey( metadata=metadata, urn=stat_entry.AFF4Path(metadata.client_urn), last_modified=stat_entry.st_mtime) if (stat_entry.HasField("registry_type") and stat_entry.HasField("registry_data")): result.type = stat_entry.registry_type # `data` can be value of arbitrary type and we need to return `bytes`. So, # if it is `bytes` we just pass it through. If it is not, we stringify it # to some human-readable form and turn it to `bytes` by UTF-8 encoding. data = stat_entry.registry_data.GetValue() if isinstance(data, bytes): result.data = data else: result.data = str(data).encode("utf-8") return [result]
130,502
Converts GrrMessage into a set of RDFValues. Args: metadata: ExportedMetadata to be used for conversion. grr_message: GrrMessage to be converted. token: Security token. Returns: List or generator with resulting RDFValues.
def Convert(self, metadata, grr_message, token=None): return self.BatchConvert([(metadata, grr_message)], token=token)
130,524
Converts a batch of GrrMessages into a set of RDFValues at once. Args: metadata_value_pairs: a list or a generator of tuples (metadata, value), where metadata is ExportedMetadata to be used for conversion and value is a GrrMessage to be converted. token: Security token. Returns: Resulting RDFValues. Empty list is a valid result and means that conversion wasn't possible.
def BatchConvert(self, metadata_value_pairs, token=None): # Group messages by source (i.e. by client urn). msg_dict = {} for metadata, msg in metadata_value_pairs: msg_dict.setdefault(msg.source, []).append((metadata, msg)) metadata_objects = [] metadata_to_fetch = [] # Open the clients we don't have metadata for and fetch metadata. for client_urn in msg_dict: try: metadata_objects.append(self.cached_metadata[client_urn]) except KeyError: metadata_to_fetch.append(client_urn) if metadata_to_fetch: if data_store.RelationalDBEnabled(): client_ids = set(urn.Basename() for urn in metadata_to_fetch) infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids) fetched_metadata = [ GetMetadata(client_id, info) for client_id, info in infos.items() ] else: client_fds = aff4.FACTORY.MultiOpen( metadata_to_fetch, mode="r", token=token) fetched_metadata = [ GetMetadataLegacy(client_fd, token=token) for client_fd in client_fds ] for metadata in fetched_metadata: self.cached_metadata[metadata.client_urn] = metadata metadata_objects.extend(fetched_metadata) data_by_type = {} for metadata in metadata_objects: try: for original_metadata, message in msg_dict[metadata.client_urn]: # Get source_urn and annotations from the original metadata # provided and original_timestamp from the payload age. new_metadata = ExportedMetadata(metadata) new_metadata.source_urn = original_metadata.source_urn new_metadata.annotations = original_metadata.annotations new_metadata.original_timestamp = message.payload.age cls_name = message.payload.__class__.__name__ # Create a dict of values for conversion keyed by type, so we can # apply the right converters to the right object types if cls_name not in data_by_type: converters_classes = ExportConverter.GetConvertersByValue( message.payload) data_by_type[cls_name] = { "converters": [cls(self.options) for cls in converters_classes], "batch_data": [(new_metadata, message.payload)] } else: data_by_type[cls_name]["batch_data"].append( (new_metadata, message.payload)) except KeyError: pass # Run all converters against all objects of the relevant type converted_batch = [] for dataset in itervalues(data_by_type): for converter in dataset["converters"]: converted_batch.extend( converter.BatchConvert(dataset["batch_data"], token=token)) return converted_batch
130,525
Converts a single CheckResult. Args: metadata: ExportedMetadata to be used for conversion. checkresult: CheckResult to be converted. token: Security token. Yields: Resulting ExportedCheckResult. Empty list is a valid result and means that conversion wasn't possible.
def Convert(self, metadata, checkresult, token=None): if checkresult.HasField("anomaly"): for anomaly in checkresult.anomaly: exported_anomaly = ExportedAnomaly( type=anomaly.type, severity=anomaly.severity, confidence=anomaly.confidence) if anomaly.symptom: exported_anomaly.symptom = anomaly.symptom if anomaly.explanation: exported_anomaly.explanation = anomaly.explanation if anomaly.generated_by: exported_anomaly.generated_by = anomaly.generated_by if anomaly.anomaly_reference_id: exported_anomaly.anomaly_reference_id = "\n".join( anomaly.anomaly_reference_id) if anomaly.finding: exported_anomaly.finding = "\n".join(anomaly.finding) yield ExportedCheckResult( metadata=metadata, check_id=checkresult.check_id, anomaly=exported_anomaly) else: yield ExportedCheckResult( metadata=metadata, check_id=checkresult.check_id)
130,527
Creates a dynamic RDF proto struct class for given osquery table. The fields of the proto will correspond to the columns of the table. Args: table: An osquery table for which the class is about to be generated. Returns: A class object corresponding to the given table.
def _RDFClass(cls, table): rdf_cls_name = "OsqueryTable{}".format(hash(table.query)) try: return cls._rdf_cls_cache[rdf_cls_name] except KeyError: pass rdf_cls = compatibility.MakeType(rdf_cls_name, (rdf_structs.RDFProtoStruct,), {}) rdf_cls.AddDescriptor( rdf_structs.ProtoEmbedded( name="metadata", field_number=1, nested=ExportedMetadata)) rdf_cls.AddDescriptor( rdf_structs.ProtoString(name="__query__", field_number=2)) for idx, column in enumerate(table.header.columns): # It is possible that RDF column is named "metadata". To avoid name clash # we must rename it to `__metadata__`. if column.name == "metadata": name = "__metadata__" else: name = column.name descriptor = rdf_structs.ProtoString(name=name, field_number=idx + 3) rdf_cls.AddDescriptor(descriptor) cls._rdf_cls_cache[rdf_cls_name] = rdf_cls return rdf_cls
130,536
Returns the path from a client action response as a string. Args: response: A client action response. pathspec_attribute: Specifies the field which stores the pathspec. Returns: The path as a string or None if no path is found.
def _ExtractPath(response, pathspec_attribute=None): path_specification = response if pathspec_attribute is not None: if response.HasField(pathspec_attribute): path_specification = response.Get(pathspec_attribute) if path_specification.HasField("pathspec"): path_specification = path_specification.pathspec if path_specification.HasField("path"): path_specification = path_specification.path if isinstance(path_specification, Text): return path_specification return None
130,538
Returns client-activity metrics for a given statistic. Args: statistic: The name of the statistic, which should also be a column in the 'clients' table. day_buckets: A set of n-day-active buckets. cursor: MySQL cursor for executing queries.
def _CountClientStatisticByLabel(self, statistic, day_buckets, cursor): day_buckets = sorted(day_buckets) sum_clauses = [] ping_cast_clauses = [] timestamp_buckets = [] now = rdfvalue.RDFDatetime.Now() for day_bucket in day_buckets: column_name = "days_active_{}".format(day_bucket) sum_clauses.append( "CAST(SUM({0}) AS UNSIGNED) AS {0}".format(column_name)) ping_cast_clauses.append( "CAST(c.last_ping > FROM_UNIXTIME(%s) AS UNSIGNED) AS {}".format( column_name)) timestamp_bucket = now - rdfvalue.Duration.FromDays(day_bucket) timestamp_buckets.append( mysql_utils.RDFDatetimeToTimestamp(timestamp_bucket)) query = .format( statistic=statistic, sum_clauses=", ".join(sum_clauses), ping_cast_clauses=", ".join(ping_cast_clauses)) cursor.execute(query, timestamp_buckets) counts = {} for response_row in cursor.fetchall(): statistic_value, client_label = response_row[:2] for i, num_actives in enumerate(response_row[2:]): if num_actives <= 0: continue stats_key = (statistic_value, client_label, day_buckets[i]) counts[stats_key] = num_actives return counts
130,580
Parses string path component to an `PathComponent` instance. Args: item: A path component string to be parsed. opts: A `PathOpts` object. Returns: `PathComponent` instance corresponding to given path fragment. Raises: ValueError: If the path item contains a recursive component fragment but cannot be parsed as such.
def ParsePathItem(item, opts=None): if item == os.path.curdir: return CurrentComponent() if item == os.path.pardir: return ParentComponent() recursion = PATH_RECURSION_REGEX.search(item) if recursion is None: return GlobComponent(item, opts) start, end = recursion.span() if not (start == 0 and end == len(item)): raise ValueError("malformed recursive component") if recursion.group("max_depth"): max_depth = int(recursion.group("max_depth")) else: max_depth = None return RecursiveComponent(max_depth=max_depth, opts=opts)
130,581
Parses given path into a stream of `PathComponent` instances. Args: path: A path to be parsed. opts: An `PathOpts` object. Yields: `PathComponent` instances corresponding to the components of the given path. Raises: ValueError: If path contains more than one recursive component.
def ParsePath(path, opts = None): precondition.AssertType(path, Text) rcount = 0 # Split the path at all forward slashes and if running under Windows, also # backward slashes. This allows ParsePath to handle native paths and also # normalized VFS paths like /HKEY_LOCAL_MACHINE/SAM. normalized_path = path.replace(os.path.sep, "/") for item in normalized_path.split("/"): component = ParsePathItem(item, opts=opts) if isinstance(component, RecursiveComponent): rcount += 1 if rcount > 1: raise ValueError("path cannot have more than one recursive component") yield component
130,582
Applies all expansion mechanisms to the given path. Args: path: A path to expand. opts: A `PathOpts` object. Yields: All paths possible to obtain from a given path by performing expansions.
def ExpandPath(path, opts=None): precondition.AssertType(path, Text) for grouped_path in ExpandGroups(path): for globbed_path in ExpandGlobs(grouped_path, opts): yield globbed_path
130,583
Performs group expansion on a given path. For example, given path `foo/{bar,baz}/{quux,norf}` this method will yield `foo/bar/quux`, `foo/bar/norf`, `foo/baz/quux`, `foo/baz/norf`. Args: path: A path to expand. Yields: Paths that can be obtained from given path by expanding groups.
def ExpandGroups(path): precondition.AssertType(path, Text) chunks = [] offset = 0 for match in PATH_GROUP_REGEX.finditer(path): chunks.append([path[offset:match.start()]]) chunks.append(match.group("alts").split(",")) offset = match.end() chunks.append([path[offset:]]) for prod in itertools.product(*chunks): yield "".join(prod)
130,584
Performs glob expansion on a given path. Path can contain regular glob elements (such as `**`, `*`, `?`, `[a-z]`). For example, having files `foo`, `bar`, `baz` glob expansion of `ba?` will yield `bar` and `baz`. Args: path: A path to expand. opts: A `PathOpts` object. Returns: Generator over all possible glob expansions of a given path. Raises: ValueError: If given path is empty or relative.
def ExpandGlobs(path, opts = None): precondition.AssertType(path, Text) if not path: raise ValueError("Path is empty") if not _IsAbsolutePath(path, opts): raise ValueError("Path '%s' is not absolute" % path) if opts is not None and opts.pathtype == rdf_paths.PathSpec.PathType.REGISTRY: # Handle HKLM\Foo and /HKLM/Foo identically. root_dir, tail = path.replace("\\", "/").lstrip("/").split("/", 1) components = list(ParsePath(tail, opts=opts)) else: drive, tail = os.path.splitdrive(path) root_dir = os.path.join(drive, os.path.sep).upper() components = list(ParsePath(tail[1:], opts=opts)) return _ExpandComponents(root_dir, components)
130,585
Returns children of a given directory. This function is intended to be used by the `PathComponent` subclasses to get initial list of potential children that then need to be filtered according to the rules of a specific component. Args: dirpath: A path to the directory. pathtype: The pathtype to use. Raises: ValueError: in case of unsupported path types.
def _ListDir(dirpath, pathtype): pathspec = rdf_paths.PathSpec(path=dirpath, pathtype=pathtype) childpaths = [] try: file_obj = vfs.VFSOpen(pathspec) for path in file_obj.ListNames(): # For Windows registry, ignore the empty string which corresponds to the # default value in the current key. Otherwise, globbing a key will yield # the key itself, because joining the name of the default value u"" with # a key name yields the key name again. if pathtype != rdf_paths.PathSpec.PathType.REGISTRY or path: childpaths.append(path) except IOError: pass return childpaths
130,588
Instantiates a new GlobComponent from a given path glob. Args: glob: A string with potential glob elements (e.g. `foo*`). opts: An optional PathOpts instance.
def __init__(self, glob, opts = None): super(GlobComponent, self).__init__() self._glob = glob self.regex = re.compile(fnmatch.translate(glob), re.I) self.opts = opts or PathOpts()
130,594
Gather open network connection stats. Args: args: An `rdf_client_action.ListNetworkConnectionArgs` instance. Yields: `rdf_client_network.NetworkConnection` instances.
def ListNetworkConnectionsFromClient(args): for proc in psutil.process_iter(): try: connections = proc.connections() except (psutil.NoSuchProcess, psutil.AccessDenied): continue for conn in connections: if args.listening_only and conn.status != "LISTEN": continue res = rdf_client_network.NetworkConnection() res.pid = proc.pid res.process_name = proc.name() res.family = conn.family res.type = conn.type try: if conn.status: res.state = conn.status except ValueError: logging.warning("Encountered unknown connection status (%s).", conn.status) res.local_address.ip, res.local_address.port = conn.laddr if conn.raddr: res.remote_address.ip, res.remote_address.port = conn.raddr yield res
130,597
Iterates over contents of the intrusive linked list of `ifaddrs`. Args: ifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL. Yields: Instances of `Ifaddr`.
def IterIfaddrs(ifaddrs): precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs)) while ifaddrs: yield ifaddrs.contents ifaddrs = ifaddrs.contents.ifa_next
130,598
Parses contents of the intrusive linked list of `ifaddrs`. Args: ifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL. Returns: An iterator over instances of `rdf_client_network.Interface`.
def ParseIfaddrs(ifaddrs): precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs)) ifaces = {} for ifaddr in IterIfaddrs(ifaddrs): ifname = ctypes.string_at(ifaddr.ifa_name).decode("utf-8") iface = ifaces.setdefault(ifname, rdf_client_network.Interface()) iface.ifname = ifname if not ifaddr.ifa_addr: continue sockaddr = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddr)) iffamily = sockaddr.contents.sa_family if iffamily == AF_INET: sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin)) address = rdf_client_network.NetworkAddress() address.address_type = rdf_client_network.NetworkAddress.Family.INET address.packed_bytes = struct.pack("=L", sockaddrin.contents.sin_addr) iface.addresses.append(address) elif iffamily == AF_INET6: sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin6)) address = rdf_client_network.NetworkAddress() address.address_type = rdf_client_network.NetworkAddress.Family.INET6 address.packed_bytes = bytes(list(sockaddrin.contents.sin6_addr)) iface.addresses.append(address) elif iffamily == AF_LINK: sockaddrdl = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrdl)) nlen = sockaddrdl.contents.sdl_nlen alen = sockaddrdl.contents.sdl_alen iface.mac_address = bytes(sockaddrdl.contents.sdl_data[nlen:nlen + alen]) else: raise ValueError("Unexpected socket address family: %s" % iffamily) return itervalues(ifaces)
130,599
Create the Service protobuf. Args: job: Launchdjobdict from servicemanagement framework. Returns: sysinfo_pb2.OSXServiceInformation proto
def CreateServiceProto(job): service = rdf_client.OSXServiceInformation( label=job.get("Label"), program=job.get("Program"), sessiontype=job.get("LimitLoadToSessionType"), lastexitstatus=int(job["LastExitStatus"]), timeout=int(job["TimeOut"]), ondemand=bool(job["OnDemand"])) for arg in job.get("ProgramArguments", "", stringify=False): # Returns CFArray of CFStrings service.args.Append(str(arg)) mach_dict = job.get("MachServices", {}, stringify=False) for key, value in iteritems(mach_dict): service.machservice.Append("%s:%s" % (key, value)) job_mach_dict = job.get("PerJobMachServices", {}, stringify=False) for key, value in iteritems(job_mach_dict): service.perjobmachservice.Append("%s:%s" % (key, value)) if "PID" in job: service.pid = job["PID"].value return service
130,602
Get running launchd jobs. Args: args: Unused. Yields: `rdf_client.OSXServiceInformation` instances. Raises: UnsupportedOSVersionError: for OS X earlier than 10.6.
def OSXEnumerateRunningServicesFromClient(args): del args # Unused. osx_version = client_utils_osx.OSXVersion() version_array = osx_version.VersionAsMajorMinor() if version_array[:2] < [10, 6]: raise UnsupportedOSVersionError( "ServiceManagement API unsupported on < 10.6. This client is %s" % osx_version.VersionString()) launchd_list = GetRunningLaunchDaemons() parser = osx_launchd.OSXLaunchdJobDict(launchd_list) for job in parser.Parse(): response = CreateServiceProto(job) yield response
130,603
Create flow throttler object. Args: daily_req_limit: Number of flows allow per user per client. Integer. dup_interval: rdfvalue.Duration time during which duplicate flows will be blocked.
def __init__(self, daily_req_limit=None, dup_interval=None): self.daily_req_limit = daily_req_limit self.dup_interval = dup_interval
130,623
Yields all flows for the given client_id and time range. Args: client_id: client URN min_create_time: minimum creation time (inclusive) token: acl token Yields: flow_objects.Flow objects
def _LoadFlows(self, client_id, min_create_time, token): if data_store.RelationalDBEnabled(): if isinstance(client_id, rdfvalue.RDFURN): client_id = client_id.Basename() flow_list = data_store.REL_DB.ReadAllFlowObjects( client_id=client_id, min_create_time=min_create_time, include_child_flows=False) for flow_obj in flow_list: yield flow_obj else: now = rdfvalue.RDFDatetime.Now() client_id_urn = rdf_client.ClientURN(client_id) flows_dir = aff4.FACTORY.Open(client_id_urn.Add("flows"), token=token) # Save DB roundtrips by checking both conditions at once. flow_list = flows_dir.ListChildren( age=(min_create_time.AsMicrosecondsSinceEpoch(), now.AsMicrosecondsSinceEpoch())) for flow_obj in aff4.FACTORY.MultiOpen(flow_list, token=token): yield rdf_flow_objects.Flow( args=flow_obj.args, flow_class_name=flow_obj.runner_args.flow_name, flow_id=flow_obj.urn.Basename(), create_time=flow_obj.context.create_time, creator=flow_obj.creator, )
130,624
Builds a stat entry object from a given path. Args: path: A path (string value) to stat. pathspec: A `PathSpec` corresponding to the `path`. ext_attrs: Whether to include extended file attributes in the result. Returns: `StatEntry` object.
def StatEntryFromPath(path, pathspec, ext_attrs=True): try: stat = filesystem.Stat.FromPath(path) except (IOError, OSError) as error: logging.error("Failed to obtain stat for '%s': %s", pathspec, error) return rdf_client_fs.StatEntry(pathspec=pathspec) return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
130,626
Build a stat entry object from a given stat object. Args: stat: A `Stat` object. pathspec: A `PathSpec` from which `stat` was obtained. ext_attrs: Whether to include extended file attributes in the result. Returns: `StatEntry` object.
def StatEntryFromStat(stat, pathspec, ext_attrs = True): result = rdf_client_fs.StatEntry(pathspec=pathspec) for attr in _STAT_ATTRS: value = getattr(stat.GetRaw(), attr, None) if value is None: continue # TODO(hanuszczak): Why are we doing this? value = int(value) if value < 0: value &= 0xFFFFFFFF setattr(result, attr, value) result.st_flags_linux = stat.GetLinuxFlags() result.st_flags_osx = stat.GetOsxFlags() if ext_attrs: # TODO(hanuszczak): Can we somehow incorporate extended attribute getter to # the `Stat` class? That would make the code a lot prettier but would force # `utils` to depend on `xattrs`. result.ext_attrs = list(GetExtAttrs(stat.GetPath())) return result
130,627
Returns a `os.stat_result` with most information from `StatEntry`. This is a lossy conversion, only the 10 first stat_result fields are populated, because the os.stat_result constructor is inflexible. Args: stat_entry: An instance of rdf_client_fs.StatEntry. Returns: An instance of `os.stat_result` with basic fields populated.
def StatResultFromStatEntry( stat_entry): values = [] for attr in _STAT_ATTRS[:10]: values.append(stat_entry.Get(attr)) return os.stat_result(values)
130,629
Initializes this object from an existing notification. Args: notification: A rdfvalues.flows.Notification object. is_pending: Indicates whether the user has already seen this notification or not. Returns: The current instance.
def InitFromNotification(self, notification, is_pending=False): self.timestamp = notification.timestamp self.message = notification.message self.subject = str(notification.subject) self.is_pending = is_pending reference_type_enum = ApiNotificationReference.Type # Please see the comments to aff4_objects.GRRUser.Notify implementation # for the details of notification.type format. Short summary: # notification.type may be one of legacy values (i.e. "ViewObject") or # have a format of "[legacy value]:[new-style notification type]", i.e. # "ViewObject:TYPE_CLIENT_INTERROGATED". legacy_type = None if ":" in notification.type: legacy_type, new_type = notification.type.split(":", 2) self.notification_type = new_type else: legacy_type = notification.type # TODO(user): refactor notifications, so that we send a meaningful # notification from the start, so that we don't have to do the # bridging/conversion/guessing here. components = self._GetUrnComponents(notification) if legacy_type == "Discovery": self.reference.type = reference_type_enum.CLIENT self.reference.client = ApiNotificationClientReference( client_id=components[0]) elif legacy_type == "ViewObject": if len(components) >= 2 and components[0] == "hunts": self.reference.type = reference_type_enum.HUNT self.reference.hunt.hunt_id = components[1] elif len(components) >= 2 and components[0] == "cron": self.reference.type = reference_type_enum.CRON self.reference.cron.cron_job_id = components[1] elif len(components) >= 3 and components[1] == "flows": self.reference.type = reference_type_enum.FLOW self.reference.flow.flow_id = components[2] self.reference.flow.client_id = components[0] elif len(components) == 1 and rdf_client.ClientURN.Validate( components[0]): self.reference.type = reference_type_enum.CLIENT self.reference.client.client_id = components[0] else: if notification.subject: path = notification.subject.Path() for prefix in itervalues(rdf_paths.PathSpec.AFF4_PREFIXES): part = "/%s%s" % (components[0], prefix) if path.startswith(part): self.reference.type = reference_type_enum.VFS self.reference.vfs.client_id = components[0] self.reference.vfs.vfs_path = (prefix + path[len(part):]).lstrip("/") break if self.reference.type != reference_type_enum.VFS: self.reference.type = reference_type_enum.UNKNOWN self.reference.unknown.subject_urn = notification.subject elif legacy_type == "FlowStatus": if not components or not rdf_client.ClientURN.Validate(components[0]): self.reference.type = reference_type_enum.UNKNOWN self.reference.unknown.subject_urn = notification.subject else: self.reference.type = reference_type_enum.FLOW self.reference.flow.flow_id = notification.source.Basename() self.reference.flow.client_id = components[0] # TODO(user): refactor GrantAccess notification so that we don't have # to infer approval type from the URN. elif legacy_type == "GrantAccess": if rdf_client.ClientURN.Validate(components[1]): self.reference.type = reference_type_enum.CLIENT_APPROVAL self.reference.client_approval.client_id = components[1] self.reference.client_approval.approval_id = components[-1] self.reference.client_approval.username = components[-2] elif components[1] == "hunts": self.reference.type = reference_type_enum.HUNT_APPROVAL self.reference.hunt_approval.hunt_id = components[2] self.reference.hunt_approval.approval_id = components[-1] self.reference.hunt_approval.username = components[-2] elif components[1] == "cron": self.reference.type = reference_type_enum.CRON_JOB_APPROVAL self.reference.cron_job_approval.cron_job_id = components[2] self.reference.cron_job_approval.approval_id = components[-1] self.reference.cron_job_approval.username = components[-2] else: self.reference.type = reference_type_enum.UNKNOWN self.reference.unknown.subject_urn = notification.subject self.reference.unknown.source_urn = notification.source return self
130,635
Gets all approvals for a given user and approval type. Args: approval_type: The type of approvals to get. offset: The starting index within the collection. count: The number of items to return. filter_func: A predicate function, returning True if a specific approval should be included in the result and False otherwise. token: The token identifying the user. Returns: A list of approvals of the given approval type.
def _GetApprovals(self, approval_type, offset, count, filter_func=None, token=None): approvals_base_urn = aff4.ROOT_URN.Add("users").Add( token.username).Add("approvals").Add(approval_type) all_children = aff4.FACTORY.RecursiveMultiListChildren([approvals_base_urn]) approvals_urns = [] for subject, children in all_children: # We only want to process leaf nodes. if children: continue approvals_urns.append(subject) approvals_urns.sort(key=lambda x: x.age, reverse=True) approvals = list( aff4.FACTORY.MultiOpen( approvals_urns, mode="r", aff4_type=aff4_security.Approval, age=aff4.ALL_TIMES, token=token)) approvals_by_urn = {} for approval in approvals: approvals_by_urn[approval.symlink_urn or approval.urn] = approval cur_offset = 0 sorted_approvals = [] for approval_urn in approvals_urns: try: approval = approvals_by_urn[approval_urn] except KeyError: continue if filter_func is not None and not filter_func(approval): continue cur_offset += 1 if cur_offset <= offset: continue if count and len(sorted_approvals) >= count: break sorted_approvals.append(approval) subjects_urns = [a.Get(a.Schema.SUBJECT) for a in approvals] subjects_by_urn = {} for subject in aff4.FACTORY.MultiOpen(subjects_urns, mode="r", token=token): subjects_by_urn[subject.urn] = subject return sorted_approvals, subjects_by_urn
130,657
Tries to lock and run cron jobs. Args: names: List of cron jobs to run. If unset, run them all. token: security token. Raises: OneOrMoreCronJobsFailedError: if one or more individual cron jobs fail. Note: a failure of a single cron job doesn't preclude other cron jobs from running.
def RunOnce(self, names=None, token=None): del token leased_jobs = data_store.REL_DB.LeaseCronJobs( cronjob_ids=names, lease_time=rdfvalue.Duration("10m")) logging.info("Leased %d cron jobs for processing.", len(leased_jobs)) if not leased_jobs: return errors = {} processed_count = 0 for job in sorted(leased_jobs, key=lambda j: j.cron_job_id): if self.TerminateStuckRunIfNeeded(job): continue if not self.JobDueToRun(job): continue try: if self.RunJob(job): processed_count += 1 else: logging.info( "Can't schedule cron job %s on a thread pool " "(all threads are busy or CPU load is high)", job.cron_job_id) break except Exception as e: # pylint: disable=broad-except logging.exception("Cron job %s has failed: %s", job.cron_job_id, e) errors[job.cron_job_id] = e logging.info("Processed %d cron jobs.", processed_count) data_store.REL_DB.ReturnLeasedCronJobs(leased_jobs) if errors: raise OneOrMoreCronJobsFailedError(errors)
130,702
Does the actual work of the Cron, if the job is due to run. Args: job: The cronjob rdfvalue that should be run. Must be leased. Returns: A boolean indicating if this cron job was started or not. False may be returned when the threadpool is already full. Raises: LockError: if the object is not locked. ValueError: If the job argument is invalid.
def RunJob(self, job): if not job.leased_until: raise LockError("CronJob must be leased for Run() to be called.") if job.leased_until < rdfvalue.RDFDatetime.Now(): raise LockError("CronJob lease expired for %s." % job.cron_job_id) logging.info("Starting cron job: %s", job.cron_job_id) if job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION: cls_name = job.args.system_cron_action.job_class_name job_cls = registry.SystemCronJobRegistry.CronJobClassByName(cls_name) name = "%s runner" % cls_name elif job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION: job_cls = registry.CronJobRegistry.CronJobClassByName("RunHunt") name = "Hunt runner" else: raise ValueError( "CronJob %s doesn't have a valid args type set." % job.cron_job_id) run_state = rdf_cronjobs.CronJobRun( cron_job_id=job.cron_job_id, status="RUNNING") run_state.GenerateRunId() run_obj = job_cls(run_state, job) wait_for_start_event, signal_event, wait_for_write_event = ( threading.Event(), threading.Event(), threading.Event()) try: self._GetThreadPool().AddTask( target=run_obj.StartRun, args=(wait_for_start_event, signal_event, wait_for_write_event), name=name, blocking=False, inline=False) if not wait_for_start_event.wait(TASK_STARTUP_WAIT): logging.error("Cron job run task for %s is too slow to start.", job.cron_job_id) # Most likely the thread pool is full and the task is sitting on the # queue. Make sure we don't put more things on the queue by returning # False. return False # We know that the cron job task has started, unblock it by setting # the signal event. If signal_event is not set (this happens if the # task sits on a ThreadPool's queue doing nothing, see the # if-statement above) the task will just be a no-op when ThreadPool # finally gets to it. This way we can ensure that we can safely return # the lease and let another worker schedule the same job. signal_event.set() wait_for_write_event.wait(TASK_STARTUP_WAIT) return True except threadpool.Full: return False
130,705
Determines if the given job is due for another run. Args: job: The cron job rdfvalue object. Returns: True if it is time to run based on the specified frequency.
def JobDueToRun(self, job): if not job.enabled: return False if job.forced_run_requested: return True now = rdfvalue.RDFDatetime.Now() if (job.last_run_time is not None and job.last_run_time + job.frequency > now): return False # No currently executing job - lets go. if not job.current_run_id: return True # There is a job executing but we allow overruns. if job.allow_overruns: return True return False
130,706
Evaluates rules held in the rule set. Args: client_obj: Either an aff4 client object or a client_info dict as returned by ReadFullInfoClient if the relational db is used for reading. Returns: A bool value of the evaluation. Raises: ValueError: The match mode is of unknown value.
def Evaluate(self, client_obj): if self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL: quantifier = all elif self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY: quantifier = any else: raise ValueError("Unexpected match mode value: %s" % self.match_mode) return quantifier(rule.Evaluate(client_obj) for rule in self.rules)
130,762
Renders default value of a given class. Args: value_cls: Default value of this class will be rendered. This class has to be (or to be a subclass of) a self.value_class (i.e. a class that this renderer is capable of rendering). Returns: An initialized default value. Raises: DefaultValueError: if something goes wrong.
def BuildDefaultValue(self, value_cls): try: return value_cls() except Exception as e: # pylint: disable=broad-except logging.exception(e) raise DefaultValueError( "Can't create default for value %s: %s" % (value_cls.__name__, e))
130,790