docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Stats given file or returns a cached result if available. Args: path: A path to the file to perform `stat` on. follow_symlink: True if `stat` of a symlink should be returned instead of a file that it points to. For non-symlinks this setting has no effect. Returns: `Stat` object corresponding to the given path.
def Get(self, path, follow_symlink = True): key = self._Key(path=path, follow_symlink=follow_symlink) try: return self._cache[key] except KeyError: value = Stat.FromPath(path, follow_symlink=follow_symlink) self._cache[key] = value # If we are not following symlinks and the file is a not symlink then # the stat result for this file stays the same even if we want to follow # symlinks. if not follow_symlink and not value.IsSymlink(): self._cache[self._Key(path=path, follow_symlink=True)] = value return value
133,142
Generate RDFs for the fully expanded configs. Args: stats: A list of RDF StatEntries corresponding to the file_objects. file_objects: A list of file handles. Returns: A tuple of a list of RDFValue PamConfigEntries found & a list of strings which are the external config references found.
def EnumerateAllConfigs(self, stats, file_objects): # Convert the stats & file_objects into a cache of a # simple path keyed dict of file contents. cache = {} for stat_obj, file_obj in zip(stats, file_objects): cache[stat_obj.pathspec.path] = utils.ReadFileBytesAsUnicode(file_obj) result = [] external = [] # Check to see if we have the old pam config file laying around. if self.OLD_PAMCONF_FILENAME in cache: # The PAM documentation says if it contains config data, then # it takes precedence over the rest of the config. # If it doesn't, the rest of the PAMDIR config counts. result, external = self.EnumerateConfig(None, self.OLD_PAMCONF_FILENAME, cache) if result: return result, external # If we made it here, there isn't a old-style pam.conf file worth # speaking of, so process everything! for path in cache: # PAM uses the basename as the 'service' id. service = os.path.basename(path) r, e = self.EnumerateConfig(service, path, cache) result.extend(r) external.extend(e) return result, external
133,153
Returns an invariant key for an AFF4 object. The object will be cached based on this key. This function is specifically extracted to ensure that we encapsulate all security critical aspects of the AFF4 object so that objects do not leak across security boundaries. Args: urn: The urn of the object. age: The age policy used to build this object. Should be one of ALL_TIMES, NEWEST_TIME or a range. Returns: A key into the cache.
def _MakeCacheInvariant(self, urn, age): precondition.AssertType(urn, Text) return "%s:%s" % (urn, self.ParseAgeSpecification(age))
133,175
Returns metadata about all urns. Currently the metadata include type, and last update time. Args: urns: The urns of the objects to open. Yields: A dict of metadata. Raises: ValueError: A string was passed instead of an iterable.
def Stat(self, urns): if isinstance(urns, string_types): raise ValueError("Expected an iterable, not string.") for subject, values in data_store.DB.MultiResolvePrefix( urns, ["aff4:type", "metadata:last"]): res = dict(urn=rdfvalue.RDFURN(subject)) for v in values: if v[0] == "aff4:type": res["type"] = v elif v[0] == "metadata:last": res["last"] = rdfvalue.RDFDatetime(v[1]) yield res
133,185
Drop all the information about given objects. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urns: Urns of objects to remove. token: The Security Token to use for opening this item. Raises: ValueError: If one of the urns is too short. This is a safety check to ensure the root is not removed.
def MultiDelete(self, urns, token=None): urns = [rdfvalue.RDFURN(urn) for urn in urns] if token is None: token = data_store.default_token for urn in urns: if urn.Path() == "/": raise ValueError("Can't delete root URN. Please enter a valid URN") deletion_pool = DeletionPool(token=token) deletion_pool.MultiMarkForDeletion(urns) marked_root_urns = deletion_pool.root_urns_for_deletion marked_urns = deletion_pool.urns_for_deletion logging.debug(u"Found %d objects to remove when removing %s", len(marked_urns), urns) logging.debug(u"Removing %d root objects when removing %s: %s", len(marked_root_urns), urns, marked_root_urns) pool = data_store.DB.GetMutationPool() for root in marked_root_urns: # Only the index of the parent object should be updated. Everything # below the target object (along with indexes) is going to be # deleted. self._DeleteChildFromIndex(root, mutation_pool=pool) for urn_to_delete in marked_urns: try: self.intermediate_cache.ExpireObject(urn_to_delete.Path()) except KeyError: pass pool.DeleteSubjects(marked_urns) pool.Flush() # Ensure this is removed from the cache as well. self.Flush() logging.debug("Removed %d objects", len(marked_urns))
133,187
Lists bunch of directories efficiently. Args: urns: List of urns to list children. limit: Max number of children to list (NOTE: this is per urn). age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Yields: Tuples of Subjects and a list of children urns of a given subject.
def MultiListChildren(self, urns, limit=None, age=NEWEST_TIME): checked_subjects = set() for subject, values in data_store.DB.AFF4MultiFetchChildren( urns, timestamp=Factory.ParseAgeSpecification(age), limit=limit): checked_subjects.add(subject) subject_result = [] for child, timestamp in values: urn = rdfvalue.RDFURN(subject).Add(child) urn.age = rdfvalue.RDFDatetime(timestamp) subject_result.append(urn) yield subject, subject_result for subject in set(urns) - checked_subjects: yield subject, []
133,188
Lists bunch of directories efficiently. Args: urn: Urn to list children. limit: Max number of children to list. age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Returns: RDFURNs instances of each child.
def ListChildren(self, urn, limit=None, age=NEWEST_TIME): _, children_urns = list( self.MultiListChildren([urn], limit=limit, age=age))[0] return children_urns
133,189
Gets all the subfields indicated by field_names. This resolves specifications like "Users.special_folders.app_data" where for each entry in the Users protobuf the corresponding app_data folder entry should be returned. Args: fd: The base RDFValue or Array. field_names: A list of strings indicating which subfields to get. Yields: All the subfields matching the field_names specification.
def GetSubFields(self, fd, field_names): if isinstance(fd, rdf_protodict.RDFValueArray): for value in fd: for res in self._GetSubField(value, field_names): yield res else: for res in self._GetSubField(fd, field_names): yield res
133,198
Given a serialized value, decode the attribute. Only attributes which have been previously defined are permitted. Args: attribute_name: The string name of the attribute. value: The serialized attribute value. ts: The timestamp of this attribute.
def DecodeValueFromAttribute(self, attribute_name, value, ts): try: # Get the Attribute object from our schema. attribute = Attribute.PREDICATES[attribute_name] cls = attribute.attribute_type self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts), self.synced_attributes) except KeyError: pass except (ValueError, rdfvalue.DecodeError): logging.debug("%s: %s invalid encoding. Skipping.", self.urn, attribute_name)
133,205
Check that the value is of the expected type. Args: attribute: An instance of Attribute(). value: An instance of RDFValue. Raises: ValueError: when the value is not of the expected type. AttributeError: When the attribute is not of type Attribute().
def _CheckAttribute(self, attribute, value): if not isinstance(attribute, Attribute): raise AttributeError("Attribute %s must be of type aff4.Attribute()" % attribute) if not isinstance(value, attribute.attribute_type): raise ValueError("Value for attribute %s must be of type %s()" % (attribute, attribute.attribute_type.__name__))
133,213
Add an additional attribute to this object. If value is None, attribute is expected to be already initialized with a value. For example: fd.AddAttribute(fd.Schema.CONTAINS("some data")) Args: attribute: The attribute name or an RDFValue derived from the attribute. value: The value the attribute will be set to. age: Age (timestamp) of the attribute. If None, current time is used. Raises: IOError: If this object is read only.
def AddAttribute(self, attribute, value=None, age=None): if "w" not in self.mode: raise IOError("Writing attribute %s to read only object." % attribute) if value is None: value = attribute attribute = value.attribute_instance # Check if this object should be locked in order to add the attribute. # NOTE: We don't care about locking when doing blind writes. if self.mode != "w" and attribute.lock_protected and not self.transaction: raise IOError("Object must be locked to write attribute %s." % attribute) self._CheckAttribute(attribute, value) # Does this represent a new version? if attribute.versioned: if attribute.creates_new_object_version: self._new_version = True # Update the time of this new attribute. if age: value.age = age else: value.age = rdfvalue.RDFDatetime.Now() # Non-versioned attributes always replace previous versions and get written # at the earliest timestamp (so they appear in all objects). else: self._to_delete.add(attribute) self.synced_attributes.pop(attribute, None) self.new_attributes.pop(attribute, None) value.age = 0 self._AddAttributeToCache(attribute, value, self.new_attributes) self._dirty = True
133,215
Yields RDFURNs of all the children of this object. Args: limit: Total number of items we will attempt to retrieve. age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range in microseconds. Yields: RDFURNs instances of each child.
def ListChildren(self, limit=None, age=NEWEST_TIME): # Just grab all the children from the index. for predicate, timestamp in data_store.DB.AFF4FetchChildren( self.urn, timestamp=Factory.ParseAgeSpecification(age), limit=limit): urn = self.urn.Add(predicate) urn.age = rdfvalue.RDFDatetime(timestamp) yield urn
133,229
Directly overwrite the current contents. Replaces the data currently in the stream with compressed_data, and closes the object. Makes it possible to avoid recompressing the data. Args: compressed_data: The data to write, must be zlib compressed. size: The uncompressed size of the data.
def OverwriteAndClose(self, compressed_data, size): self.Set(self.Schema.CONTENT(compressed_data)) self.Set(self.Schema.SIZE(size)) super(AFF4MemoryStreamBase, self).Close()
133,241
Does basic token validation. Args: token: User's credentials as access_control.ACLToken. targets: List of targets that were meant to be accessed by the token. This is used for logging purposes only. Returns: True if token is valid. Raises: access_control.UnauthorizedAccess: if token is not valid. ValueError: if targets list is empty.
def ValidateToken(token, targets): def GetSubjectForError(): if len(targets) == 1: return list(targets)[0] else: return None # All accesses need a token. if not token: raise access_control.UnauthorizedAccess( "Must give an authorization token for %s" % targets, subject=GetSubjectForError()) # Token must not be expired here. token.CheckExpiry() # Token must have identity if not token.username: raise access_control.UnauthorizedAccess( "Must specify a username for access to %s." % targets, subject=GetSubjectForError()) return True
133,266
Does basic requested access validation. Args: requested_access: String consisting or 'r', 'w' and 'q' characters. subjects: A list of subjects that are about to be accessed with a given requested_access. Used for logging purposes only. Returns: True if requested_access is valid. Raises: access_control.UnauthorizedAccess: if requested_access is not valid. ValueError: if subjects list is empty.
def ValidateAccessAndSubjects(requested_access, subjects): if not requested_access: raise access_control.UnauthorizedAccess( "Must specify requested access type for %s" % subjects) for s in requested_access: if s not in "rwq": raise ValueError( "Invalid access requested for %s: %s" % (subjects, requested_access)) if "q" in requested_access and "r" not in requested_access: raise access_control.UnauthorizedAccess( "Invalid access request: query permissions require read permissions " "for %s" % subjects, requested_access=requested_access) return True
133,267
Checks if flow can be started on a particular client. Only flows with a category can bestarted. Having a category means that the flow will be accessible from the UI. Args: flow_name: Name of the flow to check access for. Returns: True if flow is externally accessible. Raises: access_control.UnauthorizedAccess: if flow is not externally accessible.
def CheckFlowCanBeStartedOnClient(flow_name): flow_cls = flow.GRRFlow.GetPlugin(flow_name) if flow_cls.category: return True else: raise access_control.UnauthorizedAccess( "Flow %s can't be started on a client by non-suid users." % flow_name)
133,269
Streams chunks of a given file starting at given offset. Args: filedesc: A `file` object to stream. offset: An integer offset at which the file stream should start on. amount: An upper bound on number of bytes to read. Returns: Generator over `Chunk` instances.
def StreamFile(self, filedesc, offset=0, amount=None): reader = FileReader(filedesc, offset=offset) return self.Stream(reader, amount=amount)
133,286
Streams chunks of a file located at given path starting at given offset. Args: filepath: A path to the file to stream. offset: An integer offset at which the file stream should start on. amount: An upper bound on number of bytes to read. Yields: `Chunk` instances.
def StreamFilePath(self, filepath, offset=0, amount=None): with open(filepath, "rb") as filedesc: for chunk in self.StreamFile(filedesc, offset=offset, amount=amount): yield chunk
133,287
Streams chunks of memory of a given process starting at given offset. Args: process: A platform-specific `Process` instance. offset: An integer offset at which the memory stream should start on. amount: An upper bound on number of bytes to read. Returns: Generator over `Chunk` instances.
def StreamMemory(self, process, offset=0, amount=None): reader = MemoryReader(process, offset=offset) return self.Stream(reader, amount=amount)
133,288
Streams chunks of a given file starting at given offset. Args: reader: A `Reader` instance. amount: An upper bound on number of bytes to read. Yields: `Chunk` instances.
def Stream(self, reader, amount=None): if amount is None: amount = float("inf") data = reader.Read(min(self.chunk_size, amount)) if not data: return amount -= len(data) offset = reader.offset - len(data) yield Chunk(offset=offset, data=data) while amount > 0: # We need `len(data)` here because overlap size can be 0. overlap = data[len(data) - self.overlap_size:] new = reader.Read(min(self.chunk_size - self.overlap_size, amount)) if not new: return data = overlap + new amount -= len(new) offset = reader.offset - len(data) yield Chunk(offset=offset, data=data, overlap=len(overlap))
133,289
Yields spans occurrences of a given pattern within the chunk. Only matches that span over regular (non-overlapped) chunk bytes are returned. Matches lying completely within the overlapped zone are ought to be returned by the previous chunk. Args: matcher: A `Matcher` instance corresponding to the searched pattern. Yields: `Matcher.Span` object corresponding to the positions of the pattern.
def Scan(self, matcher): position = 0 while True: span = matcher.Match(self.data, position) if span is None: return # We are not interested in hits within overlap-only zone. We continue the # search just after the previous match starts because of situations where # there is a match beginning before the end of the overlap-only zone match # and ending after the overlap zone. if span.end <= self.overlap: position = span.begin + 1 continue # Since we do not care about overlapping matches we resume our search # at the end of the previous match. position = span.end yield span
133,291
Updates underlying hashers with file on a given path. Args: path: A path to the file that is going to be fed to the hashers. byte_count: A maximum numbers of bytes that are going to be processed.
def HashFilePath(self, path, byte_count): with open(path, "rb") as fd: self.HashFile(fd, byte_count)
133,302
Updates underlying hashers with a given file. Args: fd: A file object that is going to be fed to the hashers. byte_count: A maximum number of bytes that are going to be processed.
def HashFile(self, fd, byte_count): while byte_count > 0: buf_size = min(byte_count, constants.CLIENT_MAX_BUFFER_SIZE) buf = fd.read(buf_size) if not buf: break self.HashBuffer(buf) byte_count -= buf_size
133,303
Updates underlying hashers with a given buffer. Args: buf: A byte buffer (string object) that is going to be fed to the hashers.
def HashBuffer(self, buf): for hasher in itervalues(self._hashers): hasher.update(buf) if self._progress: self._progress() self._bytes_read += len(buf)
133,304
Get the set of all label names applied to all clients. Args: token: token to use when opening the index. include_catchall: If true, we include ALL_CLIENTS_LABEL in the results. Returns: set of label name strings, including the catchall "All"
def GetAllClientLabels(token, include_catchall=False): labels_index = aff4.FACTORY.Create( standard.LabelSet.CLIENT_LABELS_URN, standard.LabelSet, mode="r", token=token) labels = set(labels_index.ListLabels()) if include_catchall: labels.add(ALL_CLIENTS_LABEL) return labels
133,306
Run all the actions specified in the rule. Args: rule: Rule which actions are to be executed. client_id: Id of a client where rule's actions are to be executed. Returns: Number of actions started.
def _RunActions(self, rule, client_id): actions_count = 0 for action in rule.actions: try: # Say this flow came from the foreman. token = self.token.Copy() token.username = "Foreman" if action.HasField("hunt_id"): if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id): logging.info( "Foreman: ignoring hunt %s on client %s: was started " "here before", client_id, action.hunt_id) else: logging.info("Foreman: Starting hunt %s on client %s.", action.hunt_id, client_id) flow_cls = registry.AFF4FlowRegistry.FlowClassByName( action.hunt_name) flow_cls.StartClients(action.hunt_id, [client_id]) actions_count += 1 else: flow.StartAFF4Flow( client_id=client_id, flow_name=action.flow_name, token=token, **action.argv.ToDict()) actions_count += 1 # There could be all kinds of errors we don't know about when starting the # flow/hunt so we catch everything here. except Exception as e: # pylint: disable=broad-except logging.exception("Failure running foreman action on client %s: %s", action.hunt_id, e) return actions_count
133,315
Examines our rules and starts up flows based on the client. Args: client_id: Client id of the client for tasks to be assigned. Returns: Number of assigned tasks.
def AssignTasksToClient(self, client_id): rules = self.Get(self.Schema.RULES) if not rules: return 0 if data_store.RelationalDBEnabled(): last_foreman_run = self._GetLastForemanRunTimeRelational(client_id) else: last_foreman_run = self._GetLastForemanRunTime(client_id) latest_rule = max(rule.created for rule in rules) if latest_rule <= last_foreman_run: return 0 # Update the latest checked rule on the client. if data_store.RelationalDBEnabled(): try: self._SetLastForemanRunTimeRelational(client_id, latest_rule) except db.UnknownClientError: pass # If the relational db is used for reads, we don't have to update the # aff4 object. if not data_store.RelationalDBEnabled(): self._SetLastForemanRunTime(client_id, latest_rule) relevant_rules = [] expired_rules = False now = time.time() * 1e6 for rule in rules: if rule.expires < now: expired_rules = True continue if rule.created <= int(last_foreman_run): continue relevant_rules.append(rule) if data_store.RelationalDBEnabled(): client_data = data_store.REL_DB.ReadClientFullInfo(client_id) if client_data is None: return else: client_data = aff4.FACTORY.Open(client_id, mode="rw", token=self.token) actions_count = 0 for rule in relevant_rules: if self._EvaluateRules(rule, client_data): actions_count += self._RunActions(rule, client_id) if expired_rules: self.ExpireRules() return actions_count
133,319
Create new blob hashes and append to BlobImage. We don't support writing at arbitrary file offsets, but this method provides a convenient way to add blobs for a new file, or append content to an existing one. Args: src_fd: source file handle open for read Raises: IOError: if blob has already been finalized.
def AppendContent(self, src_fd): while 1: blob = src_fd.read(self.chunksize) if not blob: break blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob) self.AddBlob(blob_id, len(blob)) self.Flush()
133,331
Add another blob to this image using its hash. Once a blob is added that is smaller than the chunksize we finalize the file, since handling adding more blobs makes the code much more complex. Args: blob_id: rdf_objects.BlobID object. length: int length of blob Raises: IOError: if blob has been finalized.
def AddBlob(self, blob_id, length): if self.finalized and length > 0: raise IOError("Can't add blobs to finalized BlobImage") self.content_dirty = True self.index.seek(0, 2) self.index.write(blob_id.AsBytes()) self.size += length if length < self.chunksize: self.finalized = True
133,332
Initializes the artifact parser factory. Args: artifact_name: A name of the artifact this factory is supposed to provide parser instances for.
def __init__(self, artifact_name): precondition.AssertType(artifact_name, Text) self._artifact_name = artifact_name
133,342
Initializes the substitution environment. Args: var_config: A configuration (concrete values) of pattern variables. scope_config: A configuration (concrete values) of pattern scopes.
def __init__(self, var_config, scope_config): self._substs = {} self._var_config = var_config self._scope_config = scope_config for var_id, var_value in iteritems(var_config): key = "%%{var}%%".format(var=var_id) self._substs[key] = str(var_value) for scope_id, var_config in iteritems(scope_config): for var_id, var_value in iteritems(var_config): key = "%%{scope}.{var}%%".format(scope=scope_id, var=var_id) self._substs[key] = str(var_value)
133,343
Formats given pattern with this substitution environment. A pattern can contain placeholders for variables (`%%foo%%`) and scopes (`%%bar.baz%%`) that are replaced with concrete values in this substiution environment (specified in the constructor). Args: pattern: A pattern with placeholders to substitute. Returns: A pattern with placeholders substituted with concrete values.
def Substitute(self, pattern): if isinstance(pattern, bytes): substs = [re.escape(subst.encode("ascii")) for subst in self._substs] regex = re.compile(b"|".join(substs)) def Replacement(match): key = match.group(0).decode("ascii") return self._substs[key].encode("utf-8") elif isinstance(pattern, Text): substs = [re.escape(subst) for subst in self._substs] regex = re.compile("|".join(substs)) def Replacement(match): key = match.group(0) return self._substs[key] else: raise TypeError("Unexpected pattern type '{}'".format(type(pattern))) if not substs: return pattern else: return regex.sub(Replacement, pattern)
133,344
Initializes the interpolator. Args: pattern: A string (either of unicode or byte characters) with placeholders to format.
def __init__(self, pattern): super(Interpolator, self).__init__() self._pattern = pattern if isinstance(pattern, bytes): var_regex = re.compile(self._VAR_PLACEHOLDER_PATTERN.encode("ascii")) scope_regex = re.compile(self._SCOPE_PLACEHOLDER_PATTERN.encode("ascii")) decoder = lambda _: _.decode("ascii") elif isinstance(pattern, Text): var_regex = re.compile(self._VAR_PLACEHOLDER_PATTERN) scope_regex = re.compile(self._SCOPE_PLACEHOLDER_PATTERN) decoder = lambda _: _ else: raise TypeError("Unexpected pattern type '{}'".format(type(pattern))) self._vars = set() for matches in var_regex.finditer(pattern): var = matches.group("var") self._vars.add(decoder(var)) self._scopes = dict() for matches in scope_regex.finditer(pattern): scope = matches.group("scope") var = matches.group("var") self._scopes.setdefault(decoder(scope), set()).add(decoder(var)) self._var_bindings = collections.defaultdict(lambda: []) self._scope_bindings = collections.defaultdict(lambda: [])
133,345
Associates a value with given variable. This can be called multiple times to associate multiple values. Args: var_id: A variable id to bind the values to. value: A value to bind to the specified variable. Raises: KeyError: If given variable is not specified in the pattern.
def BindVar(self, var_id, value): if var_id not in self._vars: raise KeyError(var_id) self._var_bindings[var_id].append(value)
133,346
Associates given values with given scope. This can be called multiple times to associate multiple values. Args: scope_id: A scope id to bind the values to. values: A mapping from scope variable ids to values to bind in scope. Raises: KeyError: If given scope or scope variable is not specified in the pattern.
def BindScope(self, scope_id, values): if scope_id not in self._scopes: raise KeyError(scope_id) keys = set(iterkeys(values)) if keys != self._scopes[scope_id]: raise KeyError(keys ^ self._scopes[scope_id]) self._scope_bindings[scope_id].append(values)
133,347
Generates archive from a given collection. Iterates the collection and generates an archive by yielding contents of every referenced AFF4Stream. Args: items: Iterable of rdf_client_fs.StatEntry objects token: User's ACLToken. Yields: Binary chunks comprising the generated archive.
def Generate(self, items, token=None): del token # unused, to be removed with AFF4 code client_ids = set() for item_batch in collection.Batch(items, self.BATCH_SIZE): client_paths = set() for item in item_batch: try: client_path = flow_export.CollectionItemToClientPath( item, self.client_id) except flow_export.ItemNotExportableError: continue if not self.predicate(client_path): self.ignored_files.add(client_path) self.processed_files.add(client_path) continue client_ids.add(client_path.client_id) client_paths.add(client_path) for chunk in file_store.StreamFilesChunks(client_paths): self.processed_files.add(chunk.client_path) for output in self._WriteFileChunk(chunk=chunk): yield output self.processed_files |= client_paths - ( self.ignored_files | self.archived_files) if client_ids: for client_id, client_info in iteritems( data_store.REL_DB.MultiReadClientFullInfo(client_ids)): client = api_client.ApiClient().InitFromClientInfo(client_info) for chunk in self._GenerateClientInfo(client_id, client): yield chunk for chunk in self._GenerateDescription(): yield chunk yield self.archive_generator.Close()
133,371
Yields binary chunks, respecting archive file headers and footers. Args: chunk: the StreamedFileChunk to be written
def _WriteFileChunk(self, chunk): if chunk.chunk_index == 0: # Make sure size of the original file is passed. It's required # when output_writer is StreamingTarWriter. st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0)) target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix) yield self.archive_generator.WriteFileHeader(target_path, st=st) yield self.archive_generator.WriteFileChunk(chunk.data) if chunk.chunk_index == chunk.total_chunks - 1: yield self.archive_generator.WriteFileFooter() self.archived_files.add(chunk.client_path)
133,372
Escape backslashes found inside a string quote. Backslashes followed by anything other than [\'"rnbt] will raise an Error. Args: string: The string that matched. match: The match object (m.group(1) is the escaped code) Raises: ParseError: For strings other than those used to define a regexp, raise an error if the escaped string is not one of [\'"rnbt].
def StringEscape(self, string, match, **_): precondition.AssertType(string, Text) # Allow unfiltered strings for regexp operations so that escaped special # characters (e.g. \*) or special sequences (e.g. \w) can be used in # objectfilter. if self.current_expression.operator == "regexp": self.string += compatibility.UnescapeString(string) elif match.group(1) in "\\'\"rnbt": self.string += compatibility.UnescapeString(string) else: raise ParseError("Invalid escape character %s." % string)
133,399
Initializes the action plugin. Args: grr_worker: The grr client worker object which may be used to e.g. send new actions on.
def __init__(self, grr_worker=None): self.grr_worker = grr_worker self.response_id = INITIAL_RESPONSE_ID self.cpu_used = None self.nanny_controller = None self.status = rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK) self._last_gc_run = rdfvalue.RDFDatetime.Now() self._gc_frequency = config.CONFIG["Client.gc_frequency"] self.proc = psutil.Process() self.cpu_start = self.proc.cpu_times() self.cpu_limit = rdf_flows.GrrMessage().cpu_limit
133,427
This function parses the RDFValue from the server. The Run method will be called with the specified RDFValue. Args: message: The GrrMessage that we are called to process. Returns: Upon return a callback will be called on the server to register the end of the function and pass back exceptions. Raises: RuntimeError: The arguments from the server do not match the expected rdf type.
def Execute(self, message): self.message = message if message: self.require_fastpoll = message.require_fastpoll args = None try: if self.message.args_rdf_name: if not self.in_rdfvalue: raise RuntimeError("Did not expect arguments, got %s." % self.message.args_rdf_name) if self.in_rdfvalue.__name__ != self.message.args_rdf_name: raise RuntimeError( "Unexpected arg type %s != %s." % (self.message.args_rdf_name, self.in_rdfvalue.__name__)) args = self.message.payload # Only allow authenticated messages in the client if self._authentication_required and ( self.message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): raise RuntimeError("Message for %s was not Authenticated." % self.message.name) self.cpu_start = self.proc.cpu_times() self.cpu_limit = self.message.cpu_limit if getattr(flags.FLAGS, "debug_client_actions", False): pdb.set_trace() try: self.Run(args) # Ensure we always add CPU usage even if an exception occurred. finally: used = self.proc.cpu_times() self.cpu_used = (used.user - self.cpu_start.user, used.system - self.cpu_start.system) except NetworkBytesExceededError as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED, "%r: %s" % (e, e), traceback.format_exc()) # We want to report back all errors and map Python exceptions to # Grr Errors. except Exception as e: # pylint: disable=broad-except self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, "%r: %s" % (e, e), traceback.format_exc()) if flags.FLAGS.pdb_post_mortem: self.DisableNanny() pdb.post_mortem() if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK: logging.info("Job Error (%s): %s", self.__class__.__name__, self.status.error_message) if self.status.backtrace: logging.debug(self.status.backtrace) if self.cpu_used: self.status.cpu_time_used.user_cpu_time = self.cpu_used[0] self.status.cpu_time_used.system_cpu_time = self.cpu_used[1] # This returns the error status of the Actions to the flow. self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS) self._RunGC()
133,428
Parse responses with applicable parsers. Args: parser_factory: A parser factory for specific artifact. responses: A list of responses from the client. flow_obj: An artifact collection flow. Returns: A list of (possibly parsed) responses.
def ApplyParsersToResponses(parser_factory, responses, flow_obj): # We have some processors to run. knowledge_base = flow_obj.state.knowledge_base parsed_responses = [] if parser_factory.HasSingleResponseParsers(): for response in responses: for parser in parser_factory.SingleResponseParsers(): parsed_responses.extend( parser.ParseResponse(knowledge_base, response, flow_obj.args.path_type)) for parser in parser_factory.MultiResponseParsers(): parsed_responses.extend(parser.ParseResponses(knowledge_base, responses)) has_single_file_parsers = parser_factory.HasSingleFileParsers() has_multi_file_parsers = parser_factory.HasMultiFileParsers() if has_single_file_parsers or has_multi_file_parsers: precondition.AssertIterableType(responses, rdf_client_fs.StatEntry) pathspecs = [response.pathspec for response in responses] if data_store.RelationalDBEnabled(): # TODO(amoser): This is not super efficient, AFF4 provided an api to open # all pathspecs at the same time, investigate if optimizing this is worth # it. filedescs = [] for pathspec in pathspecs: client_path = db.ClientPath.FromPathSpec(flow_obj.client_id, pathspec) filedescs.append(file_store.OpenFile(client_path)) else: filedescs = MultiOpenAff4File(flow_obj, pathspecs) if has_single_file_parsers: for response, filedesc in zip(responses, filedescs): for parser in parser_factory.SingleFileParsers(): parsed_responses.extend( parser.ParseFile(knowledge_base, response.pathspec, filedesc)) if has_multi_file_parsers: for parser in parser_factory.MultiFileParsers(): parsed_responses.extend( parser.ParseFiles(knowledge_base, pathspecs, filedescs)) return parsed_responses or responses
133,442
Escapes wildcard characters for strings intended to be used with `LIKE`. Databases don't automatically escape wildcard characters ('%', '_'), so any non-literal string that is passed to `LIKE` and is expected to match literally has to be manually escaped. Args: string: A string to escape. Returns: An escaped string.
def EscapeWildcards(string): precondition.AssertType(string, Text) return string.replace("%", r"\%").replace("_", r"\_")
133,447
Returns client-activity metrics for a particular statistic. Args: day_buckets: A set of n-day-active buckets. extract_statistic_fn: A function that extracts the statistic's value from a ClientFullInfo object.
def _CountClientStatisticByLabel(self, day_buckets, extract_statistic_fn): counts = collections.defaultdict(int) now = rdfvalue.RDFDatetime.Now() for info in self.IterateAllClientsFullInfo(batch_size=db.MAX_COUNT): if not info.metadata.ping: continue statistic_value = extract_statistic_fn(info) for client_label in info.GetLabelsNames(owner="GRR"): for day_bucket in day_buckets: time_boundary = now - rdfvalue.Duration.FromDays(day_bucket) if info.metadata.ping > time_boundary: # Count the client if it has been active in the last 'day_bucket' # days. counts[(statistic_value, client_label, day_bucket)] += 1 return dict(counts)
133,478
Processes the results of the ListDirectory client action. Args: responses: a flow Responses object.
def ProcessListDirectory(self, responses): if not responses.success: raise flow.FlowError("Unable to list directory.") with data_store.DB.GetMutationPool() as pool: for response in responses: stat_entry = rdf_client_fs.StatEntry(response) filesystem.CreateAFF4Object( stat_entry, self.client_urn, pool, token=self.token) self.SendReply(stat_entry)
133,498
Initialise the parser, presenting file contents to parse. Args: contents: file contents that are to be parsed.
def __init__(self, contents): precondition.AssertOptionalType(contents, Text) self.contents = contents
133,499
Verifies the certificate using the given key. Args: public_key: The public key to use. Returns: True: Everything went well. Raises: VerificationError: The certificate did not verify.
def Verify(self, public_key): # TODO(amoser): We have to do this manually for now since cryptography does # not yet support cert verification. There is PR 2460: # https://github.com/pyca/cryptography/pull/2460/files # that will add it, once it's in we should switch to using this. # Note that all times here are in UTC. now = rdfvalue.RDFDatetime.Now().AsDatetime() if now > self._value.not_valid_after: raise VerificationError("Certificate expired!") if now < self._value.not_valid_before: raise VerificationError("Certificate not yet valid!") public_key.Verify( self._value.tbs_certificate_bytes, self._value.signature, hash_algorithm=self._value.signature_hash_algorithm) return True
133,512
Creates a new cert for the given common name. Args: csr: A CertificateSigningRequest. Returns: The signed cert.
def ClientCertFromCSR(cls, csr): builder = x509.CertificateBuilder() # Use the client CN for a cert serial_id. This will ensure we do # not have clashing cert id. common_name = csr.GetCN() serial = int(common_name.split(".")[1], 16) builder = builder.serial_number(serial) builder = builder.subject_name( x509.Name( [x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))])) now = rdfvalue.RDFDatetime.Now() now_plus_year = now + rdfvalue.Duration("52w") builder = builder.not_valid_after(now_plus_year.AsDatetime()) now_minus_ten = now - rdfvalue.Duration("10s") builder = builder.not_valid_before(now_minus_ten.AsDatetime()) # TODO(user): dependency loop with # grr/core/grr_response_core/config/client.py. # pylint: disable=protected-access ca_cert = config_lib._CONFIG["CA.certificate"] # pylint: enable=protected-access builder = builder.issuer_name(ca_cert.GetIssuer()) builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey()) # TODO(user): dependency loop with # grr/core/grr_response_core/config/client.py. # pylint: disable=protected-access ca_key = config_lib._CONFIG["PrivateKeys.ca_key"] # pylint: enable=protected-access return RDFX509Cert( builder.sign( private_key=ca_key.GetRawPrivateKey(), algorithm=hashes.SHA256(), backend=openssl.backend))
133,513
Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.
def Verify(self, public_key): if self.digest_type != self.HashType.SHA256: raise rdfvalue.DecodeError("Unsupported digest.") if self.signature_type not in [ self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS ]: raise rdfvalue.DecodeError("Unsupported signature type.") try: public_key.Verify(self.data, self.signature) except InvalidSignature as e: raise rdfvalue.DecodeError("Could not verify blob. Error: %s" % e) return True
133,529
Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.
def Sign(self, data, signing_key, verify_key=None): if signing_key.KeyLen() < 2048: logging.warning("signing key is too short.") self.signature = signing_key.Sign(data) self.signature_type = self.SignatureType.RSA_PKCS1v15 self.digest = hashlib.sha256(data).digest() self.digest_type = self.HashType.SHA256 self.data = data # Test we can verify before we send it off. if verify_key is None: verify_key = signing_key.GetPublicKey() # Verify our own data. self.Verify(verify_key) return self
133,530
Init. Args: key: The key, a rdf_crypto.EncryptionKey instance. iv: The iv, a rdf_crypto.EncryptionKey instance.
def __init__(self, key, iv): self.key = key.RawBytes() self.iv = iv.RawBytes()
133,535
Generates archive from a given collection. Iterates the collection and generates an archive by yielding contents of every referenced AFF4Stream. Args: items: Iterable with items that point to aff4 paths. token: User's ACLToken. Yields: Binary chunks comprising the generated archive.
def Generate(self, items, token=None): clients = set() for fd_urn_batch in collection.Batch( self._ItemsToUrns(items), self.BATCH_SIZE): self.total_files += len(fd_urn_batch) fds_to_write = {} for fd in aff4.FACTORY.MultiOpen(fd_urn_batch, token=token): # Derive a ClientPath from AFF4 URN to make new and old # archive_generator predicate input consistent. # TODO(user): This code is clearly hacky and intended to be removed. urn_components = fd.urn.Split() if urn_components[1:3] != ["fs", "os"]: raise AssertionError("URN components are expected to start with " "client, 'fs', 'os'. Got %r" % (urn_components,)) client_path = db.ClientPath.OS( client_id=urn_components[0], components=urn_components[3:]) if not self.predicate(client_path): self.ignored_files.add(utils.SmartUnicode(fd.urn)) continue # Any file-like object with data in AFF4 should inherit AFF4Stream. if isinstance(fd, aff4.AFF4Stream): urn_components = fd.urn.Split() clients.add(rdf_client.ClientURN(urn_components[0])) content_path = os.path.join(self.prefix, *urn_components) # Make sure size of the original file is passed. It's required # when output_writer is StreamingTarWriter. st = os.stat_result((0o644, 0, 0, 0, 0, 0, fd.size, 0, 0, 0)) fds_to_write[fd] = (content_path, st) if fds_to_write: prev_fd = None for fd, chunk, exception in aff4.AFF4Stream.MultiStream(fds_to_write): if exception: logging.exception(exception) try: self.archived_files.remove(utils.SmartUnicode(fd.urn)) except KeyError: pass # Failing is fine, since removal should be idempotent. self.failed_files.add(utils.SmartUnicode(fd.urn)) continue if prev_fd != fd: if prev_fd: yield self.archive_generator.WriteFileFooter() prev_fd = fd content_path, st = fds_to_write[fd] yield self.archive_generator.WriteFileHeader(content_path, st=st) yield self.archive_generator.WriteFileChunk(chunk) self.archived_files.add(utils.SmartUnicode(fd.urn)) if self.archive_generator.is_file_write_in_progress: yield self.archive_generator.WriteFileFooter() if clients: for client_urn_batch in collection.Batch(clients, self.BATCH_SIZE): for fd in aff4.FACTORY.MultiOpen( client_urn_batch, aff4_type=aff4_grr.VFSGRRClient, token=token): for chunk in self._GenerateClientInfo(fd): yield chunk for chunk in self._GenerateDescription(): yield chunk yield self.archive_generator.Close()
133,555
Returns the AFF4 URN this pathspec will be stored under. Args: client_urn: A ClientURN. Returns: A urn that corresponds to this pathspec. Raises: ValueError: If pathspec is not of the correct type.
def AFF4Path(self, client_urn): # If the first level is OS and the second level is TSK its probably a mount # point resolution. We map it into the tsk branch. For example if we get: # path: \\\\.\\Volume{1234}\\ # pathtype: OS # mount_point: /c:/ # nested_path { # path: /windows/ # pathtype: TSK # } # We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/ if not self.HasField("pathtype"): raise ValueError("Can't determine AFF4 path without a valid pathtype.") first_component = self[0] dev = first_component.path if first_component.HasField("offset"): # We divide here just to get prettier numbers in the GUI dev += ":{}".format(first_component.offset // 512) if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and self[1].pathtype == PathSpec.PathType.TSK): result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev] # Skip the top level pathspec. start = 1 else: # For now just map the top level prefix based on the first pathtype result = [self.AFF4_PREFIXES[first_component.pathtype]] start = 0 for p in self[start]: component = p.path # The following encode different pathspec properties into the AFF4 path in # such a way that unique files on the client are mapped to unique URNs in # the AFF4 space. Note that this transformation does not need to be # reversible since we always use the PathSpec when accessing files on the # client. if p.HasField("offset"): component += ":{}".format(p.offset // 512) # Support ADS names. if p.HasField("stream_name"): component += ":" + p.stream_name result.append(component) return client_urn.Add("/".join(result))
133,569
Encodes an object. Args: object_ (object): Object to encode. Returns: object: Encoding of the object.
def encode(self, object_): if self.enforce_reversible: self.enforce_reversible = False if self.decode(self.encode(object_)) != object_: raise ValueError('Encoding is not reversible for "%s"' % object_) self.enforce_reversible = True return object_
133,851
Decodes an object. Args: object_ (object): Encoded object. Returns: object: Object decoded.
def decode(self, encoded): if self.enforce_reversible: self.enforce_reversible = False if self.encode(self.decode(encoded)) != encoded: raise ValueError('Decoding is not reversible for "%s"' % encoded) self.enforce_reversible = True return encoded
133,853
Inverse of _escape_token(). Args: escaped_token: a unicode string Returns: token: a unicode string
def _unescape_token(escaped_token): def match(m): if m.group(1) is None: return u"_" if m.group(0) == u"\\u" else u"\\" try: return six.unichr(int(m.group(1))) except (ValueError, OverflowError): return "" trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token return _UNESCAPE_REGEX.sub(match, trimmed)
133,865
Converts a list of tokens to a list of subtoken. Args: tokens: a list of strings. Returns: a list of integers in the range [0, vocab_size)
def _tokens_to_subtoken(self, tokens): ret = [] for token in tokens: ret.extend( self._escaped_token_to_subtoken_strings(_escape_token(token, self._alphabet))) return ret
133,866
Converts a list of subtoken to a list of tokens. Args: subtokens: a list of integers in the range [0, vocab_size) Returns: a list of strings.
def _subtoken_to_tokens(self, subtokens): concatenated = "".join(subtokens) split = concatenated.split("_") return [_unescape_token(t + "_") for t in split if t]
133,867
Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings.
def _escaped_token_to_subtoken_strings(self, escaped_token): # NOTE: This algorithm is greedy; it won't necessarily produce the "best" # list of subtokens. ret = [] start = 0 token_len = len(escaped_token) while start < token_len: for end in xrange(min(token_len, start + self._max_subtoken_len), start, -1): subtoken = escaped_token[start:end] if subtoken in self._all_subtoken_strings: ret.append(subtoken) start = end break else: # Did not break # If there is no possible encoding of the escaped token then one of the # characters in the token is not in the alphabet. This should be # impossible and would be indicative of a bug. assert False, "Token substring not found in subtoken vocabulary." return ret
133,868
Train a SubwordTextTokenizer based on a dictionary of word counts. Args: token_counts: a dictionary of Unicode strings to int. min_count: an integer - discard subtokens with lower counts. num_iterations: an integer; how many iterations of refinement.
def build_from_token_counts(self, token_counts, min_count, num_iterations=4): self._init_alphabet_from_tokens(six.iterkeys(token_counts)) # Bootstrap the initial list of subtokens with the characters from the # alphabet plus the escaping characters. self._init_subtokens_from_list(list(self._alphabet)) # We build iteratively. On each iteration, we segment all the words, # then count the resulting potential subtokens, keeping the ones # with high enough counts for our new vocabulary. if min_count < 1: min_count = 1 for i in xrange(num_iterations): # Collect all substrings of the encoded token that break along current # subtoken boundaries. subtoken_counts = collections.defaultdict(int) for token, count in six.iteritems(token_counts): escaped_token = _escape_token(token, self._alphabet) subtokens = self._escaped_token_to_subtoken_strings(escaped_token) start = 0 for subtoken in subtokens: for end in xrange(start + 1, len(escaped_token) + 1): new_subtoken = escaped_token[start:end] subtoken_counts[new_subtoken] += count start += len(subtoken) # Array of sets of candidate subtoken strings, by length. len_to_subtoken_strings = [] for subtoken_string, count in six.iteritems(subtoken_counts): lsub = len(subtoken_string) if count >= min_count: while len(len_to_subtoken_strings) <= lsub: len_to_subtoken_strings.append(set()) len_to_subtoken_strings[lsub].add(subtoken_string) # Consider the candidates longest to shortest, so that if we accept # a longer subtoken string, we can decrement the counts of its # prefixes. new_subtoken_strings = [] for lsub in xrange(len(len_to_subtoken_strings) - 1, 0, -1): subtoken_strings = len_to_subtoken_strings[lsub] for subtoken_string in subtoken_strings: count = subtoken_counts[subtoken_string] if count >= min_count: # Exclude alphabet tokens here, as they must be included later, # explicitly, regardless of count. if subtoken_string not in self._alphabet: new_subtoken_strings.append((count, subtoken_string)) for l in xrange(1, lsub): subtoken_counts[subtoken_string[:l]] -= count # Include the alphabet explicitly to guarantee all strings are # encodable. new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) for a in self._alphabet) new_subtoken_strings.sort(reverse=True) # Reinitialize to the candidate vocabulary. self._init_subtokens_from_list([subtoken for _, subtoken in new_subtoken_strings])
133,873
Pad a ``tensor`` to ``length`` with ``padding_index``. Args: tensor (torch.Tensor [n, ...]): Tensor to pad. length (int): Pad the ``tensor`` up to ``length``. padding_index (int, optional): Index to pad tensor with. Returns (torch.Tensor [length, ...]) Padded Tensor.
def pad_tensor(tensor, length, padding_index=DEFAULT_PADDING_INDEX): n_padding = length - tensor.shape[0] assert n_padding >= 0 if n_padding == 0: return tensor padding = tensor.new(n_padding, *tensor.shape[1:]).fill_(padding_index) return torch.cat((tensor, padding), dim=0)
133,891
Pad a :class:`list` of ``tensors`` (``batch``) with ``padding_index``. Args: batch (:class:`list` of :class:`torch.Tensor`): Batch of tensors to pad. padding_index (int, optional): Index to pad tensors with. dim (int, optional): Dimension on to which to concatenate the batch of tensors. Returns torch.Tensor, torch.Tensor: Padded tensors and original lengths of tensors.
def stack_and_pad_tensors(batch, padding_index=DEFAULT_PADDING_INDEX, dim=0): lengths = [tensor.shape[0] for tensor in batch] max_len = max(lengths) padded = [pad_tensor(tensor, max_len, padding_index) for tensor in batch] lengths = torch.tensor(lengths) padded = torch.stack(padded, dim=dim).contiguous() for _ in range(dim): lengths = lengths.unsqueeze(0) return padded, lengths
133,892
Encodes a ``sequence``. Args: sequence (str): String ``sequence`` to encode. Returns: torch.Tensor: Encoding of the ``sequence``.
def encode(self, sequence): sequence = super().encode(sequence) sequence = self.tokenize(sequence) vector = [self.stoi.get(token, self.unknown_index) for token in sequence] if self.append_eos: vector.append(self.eos_index) return torch.tensor(vector)
133,896
Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``.
def decode(self, encoded): encoded = super().decode(encoded) tokens = [self.itos[index] for index in encoded] return self.detokenize(tokens)
133,897
Get a column or row from the dataset. Args: key (str or int): String referencing a column or integer referencing a row Returns: :class:`list` or :class:`dict`: List of column values or a dict representing a row
def __getitem__(self, key): # Given an column string return list of column values. if isinstance(key, str): if key not in self.columns: raise AttributeError('Key not in columns.') return [row[key] if key in row else None for row in self.rows] # Given an row integer return a object of row values. elif isinstance(key, (int, slice)): return self.rows[key] else: raise TypeError('Invalid argument type.')
133,899
Set a column or row for a dataset. Args: key (str or int): String referencing a column or integer referencing a row item (list or dict): Column or rows to set in the dataset.
def __setitem__(self, key, item): if isinstance(key, str): column = item self.columns.add(key) if len(column) > len(self.rows): for i, value in enumerate(column): if i < len(self.rows): self.rows[i][key] = value else: self.rows.append({key: value}) else: for i, row in enumerate(self.rows): if i < len(column): self.rows[i][key] = column[i] else: self.rows[i][key] = None elif isinstance(key, slice): rows = item for row in rows: if not isinstance(row, dict): raise ValueError('Row must be a dict.') self.columns.update(row.keys()) self.rows[key] = rows elif isinstance(key, int): row = item if not isinstance(row, dict): raise ValueError('Row must be a dict.') self.columns.update(row.keys()) self.rows[key] = row else: raise TypeError('Invalid argument type.')
133,900
``reporthook`` to use with ``urllib.request`` that prints the process of the download. Uses ``tqdm`` for progress bar. **Reference:** https://github.com/tqdm/tqdm Args: t (tqdm.tqdm) Progress bar. Example: >>> with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t: # doctest: +SKIP ... urllib.request.urlretrieve(file_url, filename=full_path, reporthook=reporthook(t))
def _reporthook(t): last_b = [0] def inner(b=1, bsize=1, tsize=None): if tsize is not None: t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return inner
133,902
Download filename from google drive unless it's already in directory. Args: filename (str): Name of the file to download to (do nothing if it already exists). url (str): URL to download from.
def _download_file_from_drive(filename, url): # pragma: no cover confirm_token = None # Since the file is big, drive will scan it for virus and take it to a # warning page. We find the confirm token on this page and append it to the # URL to start the download process. confirm_token = None session = requests.Session() response = session.get(url, stream=True) for k, v in response.cookies.items(): if k.startswith("download_warning"): confirm_token = v if confirm_token: url = url + "&confirm=" + confirm_token logger.info("Downloading %s to %s" % (url, filename)) response = session.get(url, stream=True) # Now begin the download. chunk_size = 16 * 1024 with open(filename, "wb") as f: for chunk in response.iter_content(chunk_size): if chunk: f.write(chunk) # Print newline to clear the carriage return from the download progress statinfo = os.stat(filename) logger.info("Successfully downloaded %s, %s bytes." % (filename, statinfo.st_size))
133,903
Extract a compressed file to ``directory``. Args: compressed_filename (str): Compressed file. directory (str): Extract to directory. extension (str, optional): Extension of the file; Otherwise, attempts to extract extension from the filename.
def _maybe_extract(compressed_filename, directory, extension=None): logger.info('Extracting {}'.format(compressed_filename)) if extension is None: basename = os.path.basename(compressed_filename) extension = basename.split('.', 1)[1] if 'zip' in extension: with zipfile.ZipFile(compressed_filename, "r") as zip_: zip_.extractall(directory) elif 'tar' in extension or 'tgz' in extension: with tarfile.open(compressed_filename, mode='r') as tar: tar.extractall(path=directory) logger.info('Extracted {}'.format(compressed_filename))
133,904
Return a filename from a URL Args: url (str): URL to extract filename from Returns: (str): Filename in URL
def _get_filename_from_url(url): parse = urlparse(url) return os.path.basename(parse.path)
133,905
Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip. Args: urls (str): Url of files. directory (str): Directory to download to. check_files (list of str): Check if these files exist, ensuring the download succeeded. If these files exist before the download, the download is skipped. Raises: ValueError: Error if one of the ``check_files`` are not found following the download.
def download_files_maybe_extract(urls, directory, check_files=[]): check_files = [os.path.join(directory, f) for f in check_files] if _check_download(*check_files): return for url in urls: download_file_maybe_extract(url=url, directory=directory) if not _check_download(*check_files): raise ValueError('[DOWNLOAD FAILED] `*check_files` not found')
133,907
Get all tensors associated with ``object_`` Args: object_ (any): Any object to look for tensors. Returns: (list of torch.tensor): List of tensors that are associated with ``object_``.
def get_tensors(object_): if torch.is_tensor(object_): return [object_] elif isinstance(object_, (str, float, int)): return [] tensors = set() if isinstance(object_, collections.abc.Mapping): for value in object_.values(): tensors.update(get_tensors(value)) elif isinstance(object_, collections.abc.Iterable): for value in object_: tensors.update(get_tensors(value)) else: members = [ value for key, value in inspect.getmembers(object_) if not isinstance(value, (collections.abc.Callable, type(None))) ] tensors.update(get_tensors(members)) return tensors
133,917
Given a batch sampler or sampler returns examples instead of indices Args: dataset (torch.utils.data.Dataset): Dataset to sample from. sampler (torch.utils.data.sampler.Sampler): Sampler over the dataset. Returns: generator over dataset examples
def sampler_to_iterator(dataset, sampler): for sample in sampler: if isinstance(sample, (list, tuple)): # yield a batch yield [dataset[i] for i in sample] else: # yield a single example yield dataset[sample]
133,918
Compute ``torch.equal`` with the optional mask parameter. Args: ignore_index (int, optional): Specifies a ``tensor`` index that is ignored. Returns: (bool) Returns ``True`` if target and prediction are equal.
def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None): if ignore_index is not None: assert tensor.size() == tensor_other.size() mask_arr = tensor.ne(ignore_index) tensor = tensor.masked_select(mask_arr) tensor_other = tensor_other.masked_select(mask_arr) return torch.equal(tensor, tensor_other)
133,920
Decodes a tensor into a sequence. Args: encoded (torch.Tensor): Encoded sequence. Returns: str: Sequence decoded from ``encoded``.
def decode(self, encoded): encoded = super().decode(encoded) return self.tokenizer.decode([self.itos[index] for index in encoded])
133,932
Encodes a ``label``. Args: label (object): Label to encode. Returns: torch.Tensor: Encoding of the label.
def encode(self, label): label = super().encode(label) return torch.tensor(self.stoi.get(label, self.unknown_index))
133,934
Decodes ``encoded`` label. Args: encoded (torch.Tensor): Encoded label. Returns: object: Label decoded from ``encoded``.
def decode(self, encoded): encoded = super().decode(encoded) if encoded.numel() > 1: raise ValueError( '``decode`` decodes one label at a time, use ``batch_decode`` instead.') return self.itos[encoded.squeeze().item()]
133,936
Computes the Gibbs transition models from a Bayesian Network. 'Probabilistic Graphical Model Principles and Techniques', Koller and Friedman, Section 12.3.3 pp 512-513. Parameters: ----------- model: BayesianModel The model from which probabilities will be computed.
def _get_kernel_from_bayesian_model(self, model): self.variables = np.array(model.nodes()) self.cardinalities = {var: model.get_cpds(var).variable_card for var in self.variables} for var in self.variables: other_vars = [v for v in self.variables if var != v] other_cards = [self.cardinalities[v] for v in other_vars] cpds = [cpd for cpd in model.cpds if var in cpd.scope()] prod_cpd = factor_product(*cpds) kernel = {} scope = set(prod_cpd.scope()) for tup in itertools.product(*[range(card) for card in other_cards]): states = [State(v, s) for v, s in zip(other_vars, tup) if v in scope] prod_cpd_reduced = prod_cpd.reduce(states, inplace=False) kernel[tup] = prod_cpd_reduced.values / sum(prod_cpd_reduced.values) self.transition_models[var] = kernel
136,750
Computes the Gibbs transition models from a Markov Network. 'Probabilistic Graphical Model Principles and Techniques', Koller and Friedman, Section 12.3.3 pp 512-513. Parameters: ----------- model: MarkovModel The model from which probabilities will be computed.
def _get_kernel_from_markov_model(self, model): self.variables = np.array(model.nodes()) factors_dict = {var: [] for var in self.variables} for factor in model.get_factors(): for var in factor.scope(): factors_dict[var].append(factor) # Take factor product factors_dict = {var: factor_product(*factors) if len(factors) > 1 else factors[0] for var, factors in factors_dict.items()} self.cardinalities = {var: factors_dict[var].get_cardinality([var])[var] for var in self.variables} for var in self.variables: other_vars = [v for v in self.variables if var != v] other_cards = [self.cardinalities[v] for v in other_vars] kernel = {} factor = factors_dict[var] scope = set(factor.scope()) for tup in itertools.product(*[range(card) for card in other_cards]): states = [State(first_var, s) for first_var, s in zip(other_vars, tup) if first_var in scope] reduced_factor = factor.reduce(states, inplace=False) kernel[tup] = reduced_factor.values / sum(reduced_factor.values) self.transition_models[var] = kernel
136,751
Extracting the cliques from the junction tree which are a subset of the given nodes. Parameters: ---------- junction_tree: Junction tree from which the nodes are to be extracted. nodes: iterable container A container of nodes (list, dict, set, etc.).
def _get_clique(self, junction_tree, nodes): return [clique for clique in junction_tree.nodes() if set(nodes).issubset(clique)][0]
137,009
Getting the evidence belonging to a particular timeslice. Parameters: ---------- evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence time: int the evidence corresponding to the time slice shift: int shifting the evidence corresponding to the given time slice.
def _get_evidence(self, evidence_dict, time_slice, shift): if evidence_dict: return {(node[0], shift): evidence_dict[node] for node in evidence_dict if node[1] == time_slice}
137,010
Marginalizing the factor selectively for a set of variables. Parameters: ---------- nodes: list, array-like A container of nodes (list, dict, set, etc.). factor: factor factor which is to be marginalized.
def _marginalize_factor(self, nodes, factor): marginalizing_nodes = list(set(factor.scope()).difference(nodes)) return factor.marginalize(marginalizing_nodes, inplace=False)
137,011
Method for updating the belief. Parameters: ---------- belief_prop: Belief Propagation Belief Propagation which needs to be updated. in_clique: clique The factor which needs to be updated corresponding to the input clique. out_clique_potential: factor Multiplying factor which will be multiplied to the factor corresponding to the clique.
def _update_belief(self, belief_prop, clique, clique_potential, message=None): old_factor = belief_prop.junction_tree.get_factors(clique) belief_prop.junction_tree.remove_factors(old_factor) if message: if message.scope() and clique_potential.scope(): new_factor = old_factor * message new_factor = new_factor / clique_potential else: new_factor = old_factor else: new_factor = old_factor * clique_potential belief_prop.junction_tree.add_factors(new_factor) belief_prop.calibrate()
137,012
Extracts the required factor from the junction tree. Parameters: ---------- belief_prop: Belief Propagation Belief Propagation which needs to be updated. evidence: dict a dict key, value pair as {var: state_of_var_observed}
def _get_factor(self, belief_prop, evidence): final_factor = factor_product(*belief_prop.junction_tree.get_factors()) if evidence: for var in evidence: if var in final_factor.scope(): final_factor.reduce([(var, evidence[var])]) return final_factor
137,013
Shifting the factor to a certain required time slice. Parameters: ---------- factor: DiscreteFactor The factor which needs to be shifted. shift: int The new timeslice to which the factor should belong to.
def _shift_factor(self, factor, shift): new_scope = self._shift_nodes(factor.scope(), shift) return DiscreteFactor(new_scope, factor.cardinality, factor.values)
137,014
Add a variable to the model. Parameters: ----------- variable: any hashable python object card: int Representing the cardinality of the variable to be added. Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> model = MC() >>> model.add_variable('x', 4)
def add_variable(self, variable, card=0): if variable not in self.variables: self.variables.append(variable) else: warn('Variable {var} already exists.'.format(var=variable)) self.cardinalities[variable] = card self.transition_models[variable] = {}
137,140
Add several variables to the model at once. Parameters: ----------- variables: array-like iterable object List of variables to be added. cards: array-like iterable object List of cardinalities of the variables to be added. Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> model = MC() >>> model.add_variables_from(['x', 'y'], [3, 4])
def add_variables_from(self, variables, cards): for var, card in zip(variables, cards): self.add_variable(var, card)
137,141
Predicts class label for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_y, n_pixels_x, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_y, n_pixels_x] Target labels or masks.
def image_predict(self, X): self._check_image(X) new_shape = (X.shape[0] * X.shape[1] * X.shape[2],) if len(X.shape) == 4: new_shape += (X.shape[3],) pixels = X.reshape(new_shape) predictions = self.classifier.predict(self._transform_input(pixels)) return predictions.reshape(X.shape[0], X.shape[1], X.shape[2])
137,567
Predicts class probabilities for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes] Target probabilities
def image_predict_proba(self, X): self._check_image(X) new_shape = (X.shape[0] * X.shape[1] * X.shape[2],) if len(X.shape) == 4: new_shape += (X.shape[3],) pixels = X.reshape(new_shape) probabilities = self.classifier.predict_proba(self._transform_input(pixels)) return probabilities.reshape(X.shape[0], X.shape[1], X.shape[2], probabilities.shape[1])
137,568
Predicts class label for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_y, n_pixels_x, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_y, n_pixels_x] Target labels or masks.
def image_predict(self, X): self._check_image(X) patches, patches_shape = self._to_patches(X) predictions = self.classifier.predict(self._transform_input(patches)) image_predictions = predictions.reshape(patches_shape[0:3]) image_results = np.zeros((self._samples,) + self._image_size) nx, ny = self.receptive_field row_steps = self._image_size[0] // nx col_steps = self._image_size[1] // ny # how can this be optimised? for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)): image_results[k, nx * i:nx * (i + 1), ny * j:ny * (j + 1)] = image_predictions[k, i, j] return image_results
137,570
Predicts class probabilities for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes] Target probabilities
def image_predict_proba(self, X): self._check_image(X) patches, patches_shape = self._to_patches(X) probabilities = self.classifier.predict_proba(self._transform_input(patches)) image_probabilities = probabilities.reshape(patches_shape[0:3] + (probabilities.shape[1],)) image_results = np.zeros((self._samples,) + self._image_size + (probabilities.shape[1],)) nx, ny = self.receptive_field row_steps = self._image_size[0] // nx col_steps = self._image_size[1] // ny # how can this be optimised? for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)): image_results[k, nx * i:nx * (i + 1), ny * j:ny * (j + 1), :] = image_probabilities[k, i, j, :] return image_results
137,571
Predicts class label for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_y, n_pixels_x, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_y, n_pixels_x] Target labels or masks.
def image_predict(self, X): self._check_image(X) if self.mode == 'majority_class': predictions = self.pixel_classifier.image_predict(X) elif self.mode == 'mean_prob': probabilities = self.image_predict_proba(X) predictions = (probabilities[..., self.target] > self.target_threshold).astype(np.int) patches, _ = self._to_patches(predictions) row_steps = self._image_size[0] // self.patch_size[0] col_steps = self._image_size[1] // self.patch_size[1] # how can this be optimised? for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)): patches[k, i, j] = self._target(patches[k, i, j]) return predictions
137,575
Predicts class probabilities for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes] Target probabilities
def image_predict_proba(self, X): self._check_image(X) probabilities = self.pixel_classifier.image_predict_proba(X) patches, _ = self._to_patches(probabilities) row_steps = self._image_size[0] // self.patch_size[0] col_steps = self._image_size[1] // self.patch_size[1] ps = self.patch_size[0] * self.patch_size[1] # how can this be optimised? for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)): patches[k, i, j, 0] = np.sum(patches[k, i, j, 0]) / ps patches[k, i, j, 1] = np.sum(patches[k, i, j, 1]) / ps return probabilities
137,576
Creates the TensorFlow operations for calculating the L2 loss between predicted state values and actual rewards. Args: states: Dict of state tensors. internals: List of prior internal state tensors. reward: Reward tensor. update: Boolean tensor indicating whether this call happens during an update. reference: Optional reference tensor(s), in case of a comparative loss. Returns: Loss tensor
def tf_loss(self, states, internals, reward, update, reference=None): prediction = self.predict(states=states, internals=internals, update=update) return tf.nn.l2_loss(t=(prediction - reward))
137,843
Creates a new conjugate gradient solver instance. Args: max_iterations: Maximum number of iterations before termination. damping: Damping factor. unroll_loop: Unrolls the TensorFlow while loop if true.
def __init__(self, max_iterations, damping, unroll_loop=False): assert damping >= 0.0 self.damping = damping super(ConjugateGradient, self).__init__(max_iterations=max_iterations, unroll_loop=unroll_loop)
137,864
Iteratively solves the system of linear equations $A x = b$. Args: fn_x: A callable returning the left-hand side $A x$ of the system of linear equations. x_init: Initial solution guess $x_0$, zero vector if None. b: The right-hand side $b$ of the system of linear equations. Returns: A solution $x$ to the problem as given by the solver.
def tf_solve(self, fn_x, x_init, b): return super(ConjugateGradient, self).tf_solve(fn_x, x_init, b)
137,865
Initialization step preparing the arguments for the first iteration of the loop body: $x_0, 0, p_0, r_0, r_0^2$. Args: x_init: Initial solution guess $x_0$, zero vector if None. b: The right-hand side $b$ of the system of linear equations. Returns: Initial arguments for tf_step.
def tf_initialize(self, x_init, b): if x_init is None: # Initial guess is zero vector if not given. x_init = [tf.zeros(shape=util.shape(t)) for t in b] initial_args = super(ConjugateGradient, self).tf_initialize(x_init) # r_0 := b - A * x_0 # c_0 := r_0 conjugate = residual = [t - fx for t, fx in zip(b, self.fn_x(x_init))] # r_0^2 := r^T * r squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in residual]) return initial_args + (conjugate, residual, squared_residual)
137,866
Iteration loop body of the conjugate gradient algorithm. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. conjugate: Current conjugate $c_t$. residual: Current residual $r_t$. squared_residual: Current squared residual $r_t^2$. Returns: Updated arguments for next iteration.
def tf_step(self, x, iteration, conjugate, residual, squared_residual): x, next_iteration, conjugate, residual, squared_residual = super(ConjugateGradient, self).tf_step( x, iteration, conjugate, residual, squared_residual ) # Ac := A * c_t A_conjugate = self.fn_x(conjugate) # TODO: reference? if self.damping > 0.0: A_conjugate = [A_conj + self.damping * conj for A_conj, conj in zip(A_conjugate, conjugate)] # cAc := c_t^T * Ac conjugate_A_conjugate = tf.add_n( inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for conj, A_conj in zip(conjugate, A_conjugate)] ) # \alpha := r_t^2 / cAc alpha = squared_residual / tf.maximum(x=conjugate_A_conjugate, y=util.epsilon) # x_{t+1} := x_t + \alpha * c_t next_x = [t + alpha * conj for t, conj in zip(x, conjugate)] # r_{t+1} := r_t - \alpha * Ac next_residual = [res - alpha * A_conj for res, A_conj in zip(residual, A_conjugate)] # r_{t+1}^2 := r_{t+1}^T * r_{t+1} next_squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in next_residual]) # \beta = r_{t+1}^2 / r_t^2 beta = next_squared_residual / tf.maximum(x=squared_residual, y=util.epsilon) # c_{t+1} := r_{t+1} + \beta * c_t next_conjugate = [res + beta * conj for res, conj in zip(next_residual, conjugate)] return next_x, next_iteration, next_conjugate, next_residual, next_squared_residual
137,867
Termination condition: max number of iterations, or residual sufficiently small. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. conjugate: Current conjugate $c_t$. residual: Current residual $r_t$. squared_residual: Current squared residual $r_t^2$. Returns: True if another iteration should be performed.
def tf_next_step(self, x, iteration, conjugate, residual, squared_residual): next_step = super(ConjugateGradient, self).tf_next_step(x, iteration, conjugate, residual, squared_residual) return tf.logical_and(x=next_step, y=(squared_residual >= util.epsilon))
137,868
Creates a new multi-step meta optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer. clipping_value: Clip deltas at this value.
def __init__(self, optimizer, clipping_value, scope='clipped-step', summary_labels=()): assert isinstance(clipping_value, float) and clipping_value > 0.0 self.clipping_value = clipping_value super(ClippedStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
137,877
Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. **kwargs: Additional arguments passed on to the internal optimizer. Returns: List of delta tensors corresponding to the updates for each optimized variable.
def tf_step(self, time, variables, **kwargs): deltas = self.optimizer.step(time=time, variables=variables, **kwargs) with tf.control_dependencies(control_inputs=deltas): clipped_deltas = list() exceeding_deltas = list() for delta in deltas: clipped_delta = tf.clip_by_value( t=delta, clip_value_min=-self.clipping_value, clip_value_max=self.clipping_value ) clipped_deltas.append(clipped_delta) exceeding_deltas.append(clipped_delta - delta) applied = self.apply_step(variables=variables, deltas=exceeding_deltas) with tf.control_dependencies(control_inputs=(applied,)): return [delta + 0.0 for delta in clipped_deltas]
137,878