docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Process a file line by line.
Args:
file_obj: The file to parse.
line_parser: The parser method used to process and store line content.
Raises:
parser.ParseError if the parser is unable to process the line. | def _ParseFile(self, file_obj, line_parser):
lines = [
l.strip() for l in utils.ReadFileBytesAsUnicode(file_obj).splitlines()
]
try:
for index, line in enumerate(lines):
if line:
line_parser(line)
except (IndexError, KeyError) as e:
raise parser.ParseError("Invalid file at line %d: %s" % (index + 1, e)) | 132,522 |
Verify that entries that claim to use shadow files have a shadow entry.
If the entries of the non-shadowed file indicate that a shadow file is used,
check that there is actually an entry for that file in shadow.
Args:
store_type: The type of password store that should be used (e.g.
/etc/shadow or /etc/gshadow) | def ReconcileShadow(self, store_type):
for k, v in iteritems(self.entry):
if v.pw_entry.store == store_type:
shadow_entry = self.shadow.get(k)
if shadow_entry is not None:
v.pw_entry = shadow_entry
else:
v.pw_entry.store = "UNKNOWN" | 132,523 |
Extract the members of each group from /etc/gshadow.
Identifies the groups in /etc/gshadow and several attributes of the group,
including how the password is crypted (if set).
gshadow files have the format group_name:passwd:admins:members
admins are both group members and can manage passwords and memberships.
Args:
line: An entry in gshadow. | def ParseGshadowEntry(self, line):
fields = ("name", "passwd", "administrators", "members")
if line:
rslt = dict(zip(fields, line.split(":")))
# Add the shadow state to the internal store.
name = rslt["name"]
pw_entry = self.shadow.setdefault(name, rdf_client.PwEntry())
pw_entry.store = self.shadow_store
pw_entry.hash_type = self.GetHashType(rslt["passwd"])
# Add the members to the internal store.
members = self.gshadow_members.setdefault(name, set())
for accts in rslt["administrators"], rslt["members"]:
if accts:
members.update(accts.split(",")) | 132,528 |
Extract the user accounts in /etc/shadow.
Identifies the users in /etc/shadow and several attributes of their account,
including how their password is crypted and password aging characteristics.
Args:
line: An entry of the shadow file. | def ParseShadowEntry(self, line):
fields = ("login", "passwd", "last_change", "min_age", "max_age",
"warn_time", "inactivity", "expire", "reserved")
if line:
rslt = dict(zip(fields, line.split(":")))
pw_entry = self.shadow.setdefault(rslt["login"], rdf_client.PwEntry())
pw_entry.store = self.shadow_store
pw_entry.hash_type = self.GetHashType(rslt["passwd"])
# Tread carefully here in case these values aren't set.
last_change = rslt.get("last_change")
if last_change:
pw_entry.age = int(last_change)
max_age = rslt.get("max_age")
if max_age:
pw_entry.max_age = int(max_age) | 132,534 |
Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values. | def _ParseShVariables(self, lines):
paths = {}
for line in lines:
for entry in line:
if "=" in entry:
# Pad out the list so that it's always 2 elements, even if the split
# failed.
target, vals = (entry.split("=", 1) + [""])[:2]
if vals:
path_vals = vals.split(":")
else:
path_vals = []
self._ExpandPath(target, path_vals, paths)
elif entry not in self._SH_CONTINUATION:
# Stop processing the line unless the entry might allow paths to still
# be set, e.g.
# reserved words: "export"
# conditions: { PATH=VAL } && PATH=:$PATH || PATH=.
break
return paths | 132,544 |
Constructor.
Args:
base_fd: A handler to the predecessor handler.
handlers: A mapping from rdf_paths.PathSpec.PathType to classes
implementing VFSHandler.
pathspec: The pathspec to open.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Raises:
IOError: if this handler can not be instantiated over the
requested path. | def __init__(self, base_fd, handlers, pathspec=None, progress_callback=None):
del pathspec # Unused.
self.base_fd = base_fd
self.progress_callback = progress_callback
self._handlers = handlers
if base_fd is None:
self.pathspec = rdf_paths.PathSpec()
else:
# Make a copy of the base pathspec.
self.pathspec = base_fd.pathspec.Copy()
self.metadata = {} | 132,547 |
Returns the name of the component which matches best our base listing.
In order to do the best case insensitive matching we list the files in the
base handler and return the base match for this component.
Args:
component: A component name which should be present in this directory.
Returns:
the best component name. | def MatchBestComponentName(self, component):
fd = self.OpenAsContainer()
# Adjust the component casing
file_listing = set(fd.ListNames())
# First try an exact match
if component not in file_listing:
# Now try to match lower case
lower_component = component.lower()
for x in file_listing:
if lower_component == x.lower():
component = x
break
if fd.supported_pathtype != self.pathspec.pathtype:
new_pathspec = rdf_paths.PathSpec(
path=component, pathtype=fd.supported_pathtype)
else:
new_pathspec = self.pathspec.last.Copy()
new_pathspec.path = component
return new_pathspec | 132,550 |
Constructor for the Hunt Runner.
Args:
hunt_obj: The hunt object this runner will run states for.
runner_args: A HuntRunnerArgs() instance containing initial values. If not
specified, we use the runner_args from the hunt_obj.
token: An instance of access_control.ACLToken security token. | def __init__(self, hunt_obj, runner_args=None, token=None):
self.token = token or hunt_obj.token
self.queue_manager = queue_manager.QueueManager(token=self.token)
self.outbound_lock = threading.Lock()
self.hunt_obj = hunt_obj
# Initialize from a new runner args proto.
if runner_args is not None:
self.runner_args = runner_args
self.session_id = self.GetNewSessionID()
self.hunt_obj.urn = self.session_id
# Create a context.
self.context = self.InitializeContext(runner_args)
self.hunt_obj.context = self.context
self.context.session_id = self.session_id
else:
# Retrieve args from the hunts object's context. The hunt object is
# responsible for storing our context, although they do not generally
# access it directly.
self.context = self.hunt_obj.context
self.runner_args = self.hunt_obj.runner_args
# Populate the hunt object's urn with the session id.
self.hunt_obj.urn = self.session_id = self.context.session_id | 132,560 |
Go through the list of requests and process the completed ones.
We take a snapshot in time of all requests and responses for this hunt. We
then process as many completed requests as possible. If responses are not
quite here we leave it for next time.
Args:
notification: The notification object that triggered this processing.
thread_pool: The thread pool to process the responses on. | def ProcessCompletedRequests(self, notification, thread_pool):
# First ensure that client messages are all removed. NOTE: We make a new
# queue manager here because we want only the client messages to be removed
# ASAP. This must happen before we actually run the hunt to ensure the
# client requests are removed from the client queues.
with queue_manager.QueueManager(token=self.token) as manager:
for request, _ in manager.FetchCompletedRequests(
self.session_id, timestamp=(0, notification.timestamp)):
# Requests which are not destined to clients have no embedded request
# message.
if request.HasField("request"):
manager.DeQueueClientRequest(request.request)
processing = []
while True:
try:
# Here we only care about completed requests - i.e. those requests with
# responses followed by a status message.
for request, responses in self.queue_manager.FetchCompletedResponses(
self.session_id, timestamp=(0, notification.timestamp)):
if request.id == 0 or not responses:
continue
# Do we have all the responses here? This can happen if some of the
# responses were lost.
if len(responses) != responses[-1].response_id:
# If we can retransmit do so. Note, this is different from the
# automatic retransmission facilitated by the task scheduler (the
# Task.task_ttl field) which would happen regardless of these.
if request.transmission_count < 5:
stats_collector_instance.Get().IncrementCounter(
"grr_request_retransmission_count")
request.transmission_count += 1
self.QueueRequest(request)
break
# If we get here its all good - run the hunt.
self.hunt_obj.HeartBeat()
self._Process(
request, responses, thread_pool=thread_pool, events=processing)
# At this point we have processed this request - we can remove it and
# its responses from the queue.
self.queue_manager.DeleteRequest(request)
self.context.next_processed_request += 1
# We are done here.
return
except queue_manager.MoreDataException:
# Join any threads.
for event in processing:
event.wait()
# We did not read all the requests/responses in this run in order to
# keep a low memory footprint and have to make another pass.
self.FlushMessages()
self.hunt_obj.Flush()
continue
finally:
# Join any threads.
for event in processing:
event.wait() | 132,561 |
Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request.
event: A threading.Event() instance to signal completion of this request.
direct_response: A flow.Responses() object can be provided to avoid
creation of one. | def RunStateMethod(self,
method_name,
request=None,
responses=None,
event=None,
direct_response=None):
client_id = None
try:
self.context.current_state = method_name
if request and responses:
client_id = request.client_id or self.runner_args.client_id
logging.debug("%s Running %s with %d responses from %s",
self.session_id, method_name, len(responses), client_id)
else:
logging.debug("%s Running state method %s", self.session_id,
method_name)
# Extend our lease if needed.
self.hunt_obj.HeartBeat()
try:
method = getattr(self.hunt_obj, method_name)
except AttributeError:
raise flow_runner.FlowRunnerError(
"Flow %s has no state method %s" %
(self.hunt_obj.__class__.__name__, method_name))
if direct_response:
method(direct_response)
elif method_name == "Start":
method()
else:
# Prepare a responses object for the state method to use:
responses = flow_responses.Responses.FromLegacyResponses(
request=request, responses=responses)
if responses.status:
self.SaveResourceUsage(request.client_id, responses.status)
stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")
method(responses)
# We don't know here what exceptions can be thrown in the flow but we have
# to continue. Thus, we catch everything.
except Exception as e: # pylint: disable=broad-except
# TODO(user): Deprecate in favor of 'flow_errors'.
stats_collector_instance.Get().IncrementCounter("grr_flow_errors")
stats_collector_instance.Get().IncrementCounter(
"flow_errors", fields=[self.hunt_obj.Name()])
logging.exception("Hunt %s raised %s.", self.session_id, e)
self.Error(traceback.format_exc(), client_id=client_id)
finally:
if event:
event.set() | 132,562 |
Logs the message using the hunt's standard logging.
Args:
format_str: Format string
*args: arguments to the format string
Raises:
RuntimeError: on parent missing logs_collection | def Log(self, format_str, *args):
format_str = utils.SmartUnicode(format_str)
status = format_str
if args:
try:
# The status message is always in unicode
status = format_str % args
except TypeError:
logging.error(
"Tried to log a format string with the wrong number "
"of arguments: %s", format_str)
logging.info("%s: %s", self.session_id, status)
self.context.status = utils.SmartUnicode(status)
log_entry = rdf_flows.FlowLog(
client_id=None,
urn=self.session_id,
flow_name=self.hunt_obj.__class__.__name__,
log_message=status)
logs_collection_urn = self.hunt_obj.logs_collection_urn
with data_store.DB.GetMutationPool() as pool:
grr_collections.LogCollection.StaticAdd(
logs_collection_urn, log_entry, mutation_pool=pool) | 132,573 |
This method is called by the foreman for each client it discovers.
Note that this function is performance sensitive since it is called by the
foreman for every client which needs to be scheduled.
Args:
hunt_id: The hunt to schedule.
client_ids: List of clients that should be added to the hunt.
token: An optional access token to use. | def StartClients(cls, hunt_id, client_ids, token=None):
token = token or access_control.ACLToken(username="Hunt", reason="hunting")
with queue_manager.QueueManager(token=token) as flow_manager:
for client_id in client_ids:
# Now we construct a special response which will be sent to the hunt
# flow. Randomize the request_id so we do not overwrite other messages
# in the queue.
state = rdf_flow_runner.RequestState(
id=random.UInt32(),
session_id=hunt_id,
client_id=client_id,
next_state="AddClient")
# Queue the new request.
flow_manager.QueueRequest(state)
# Send a response.
msg = rdf_flows.GrrMessage(
session_id=hunt_id,
request_id=state.id,
response_id=1,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
type=rdf_flows.GrrMessage.Type.STATUS,
payload=rdf_flows.GrrStatus())
flow_manager.QueueResponse(msg)
# And notify the worker about it.
flow_manager.QueueNotification(session_id=hunt_id) | 132,596 |
Process status message from a client and update the stats.
Args:
client_id: Client id.
status: The status object returned from the client. | def ProcessClientResourcesStats(self, client_id, status):
if hasattr(status, "child_session_id"):
flow_path = status.child_session_id
else:
flow_path = "aff4:/%s/flows/%s" % (status.client_id, status.flow_id)
resources = rdf_client_stats.ClientResources()
resources.client_id = client_id
resources.session_id = flow_path
resources.cpu_usage.user_cpu_time = status.cpu_time_used.user_cpu_time
resources.cpu_usage.system_cpu_time = status.cpu_time_used.system_cpu_time
resources.network_bytes_sent = status.network_bytes_sent
self.context.usage_stats.RegisterResources(resources) | 132,605 |
Verifies the HMAC.
This method raises a DecryptionError if the received HMAC does not
verify. If the HMAC verifies correctly, True is returned.
Args:
comms: The comms RdfValue to verify.
Raises:
DecryptionError: The HMAC did not verify.
Returns:
True | def _VerifyHMAC(self, comms=None):
# Check the encrypted message integrity using HMAC.
if self.hmac_type == "SIMPLE_HMAC":
msg = comms.encrypted
digest = comms.hmac
elif self.hmac_type == "FULL_HMAC":
msg = b"".join([
comms.encrypted, comms.encrypted_cipher,
comms.encrypted_cipher_metadata,
comms.packet_iv.SerializeToString(),
struct.pack("<I", comms.api_version)
])
digest = comms.full_hmac
else:
raise DecryptionError("HMAC type no supported.")
try:
rdf_crypto.HMAC(self.cipher.hmac_key).Verify(msg, digest)
except rdf_crypto.VerificationError as e:
raise DecryptionError("HMAC verification failed: %s" % e)
return True | 132,637 |
Verifies the signature on the encrypted cipher block.
This method returns True if the signature verifies correctly with
the key given.
Args:
remote_public_key: The remote public key.
Returns:
None
Raises:
rdf_crypto.VerificationError: A signature and a key were both given but
verification fails. | def VerifyCipherSignature(self, remote_public_key):
if self.cipher_metadata.signature and remote_public_key:
stats_collector_instance.Get().IncrementCounter("grr_rsa_operations")
remote_public_key.Verify(self.serialized_cipher,
self.cipher_metadata.signature)
return True | 132,638 |
Creates a communicator.
Args:
certificate: Our own certificate.
private_key: Our own private key. | def __init__(self, certificate=None, private_key=None):
self.private_key = private_key
self.certificate = certificate
self._ClearServerCipherCache()
# A cache for encrypted ciphers
self.encrypted_cipher_cache = utils.FastStore(max_size=50000) | 132,639 |
Decrypt the serialized, encrypted string.
Args:
encrypted_response: A serialized and encrypted string.
Returns:
a Packed_Message_List rdfvalue | def DecryptMessage(self, encrypted_response):
try:
response_comms = rdf_flows.ClientCommunication.FromSerializedString(
encrypted_response)
return self.DecodeMessages(response_comms)
except (rdfvalue.DecodeError, type_info.TypeValueError, ValueError,
AttributeError) as e:
raise DecodingError("Error while decrypting messages: %s" % e) | 132,643 |
Decompress the message data from packed_message_list.
Args:
packed_message_list: A PackedMessageList rdfvalue with some data in it.
Returns:
a MessageList rdfvalue.
Raises:
DecodingError: If decompression fails. | def DecompressMessageList(cls, packed_message_list):
compression = packed_message_list.compression
if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED:
data = packed_message_list.message_list
elif (compression ==
rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION):
try:
data = zlib.decompress(packed_message_list.message_list)
except zlib.error as e:
raise DecodingError("Failed to decompress: %s" % e)
else:
raise DecodingError("Compression scheme not supported")
try:
result = rdf_flows.MessageList.FromSerializedString(data)
except rdfvalue.DecodeError:
raise DecodingError("RDFValue parsing failed.")
return result | 132,644 |
Extract and verify server message.
Args:
response_comms: A ClientCommunication rdfvalue
Returns:
list of messages and the CN where they came from.
Raises:
DecryptionError: If the message failed to decrypt properly. | def DecodeMessages(self, response_comms):
# Have we seen this cipher before?
cipher_verified = False
try:
cipher = self.encrypted_cipher_cache.Get(response_comms.encrypted_cipher)
stats_collector_instance.Get().IncrementCounter(
"grr_encrypted_cipher_cache", fields=["hits"])
# Even though we have seen this encrypted cipher already, we should still
# make sure that all the other fields are sane and verify the HMAC.
cipher.VerifyReceivedHMAC(response_comms)
cipher_verified = True
# If we have the cipher in the cache, we know the source and
# should have a corresponding public key.
source = cipher.GetSource()
remote_public_key = self._GetRemotePublicKey(source)
except KeyError:
stats_collector_instance.Get().IncrementCounter(
"grr_encrypted_cipher_cache", fields=["misses"])
cipher = ReceivedCipher(response_comms, self.private_key)
source = cipher.GetSource()
try:
remote_public_key = self._GetRemotePublicKey(source)
if cipher.VerifyCipherSignature(remote_public_key):
# At this point we know this cipher is legit, we can cache it.
self.encrypted_cipher_cache.Put(response_comms.encrypted_cipher,
cipher)
cipher_verified = True
except UnknownClientCertError:
# We don't know who we are talking to.
remote_public_key = None
# Decrypt the message with the per packet IV.
plain = cipher.Decrypt(response_comms.encrypted, response_comms.packet_iv)
try:
packed_message_list = rdf_flows.PackedMessageList.FromSerializedString(
plain)
except rdfvalue.DecodeError as e:
raise DecryptionError(e)
message_list = self.DecompressMessageList(packed_message_list)
# Are these messages authenticated?
# pyformat: disable
auth_state = self.VerifyMessageSignature(
response_comms,
packed_message_list,
cipher,
cipher_verified,
response_comms.api_version,
remote_public_key)
# pyformat: enable
# Mark messages as authenticated and where they came from.
for msg in message_list.job:
msg.auth_state = auth_state
msg.source = cipher.cipher_metadata.source
return (message_list.job, cipher.cipher_metadata.source,
packed_message_list.timestamp) | 132,645 |
Parse a human readable string of a byte string.
Args:
string: The string to parse.
Raises:
DecodeError: If the string can not be parsed. | def ParseFromHumanReadable(self, string):
if not string:
return None
match = self.REGEX.match(string.strip().lower())
if not match:
raise DecodeError("Unknown specification for ByteSize %s" % string)
multiplier = self.DIVIDERS.get(match.group(2))
if not multiplier:
raise DecodeError("Invalid multiplier %s" % match.group(2))
# The value may be represented as a float, but if not dont lose accuracy.
value = match.group(1)
if "." in value:
value = float(value)
else:
value = int(value)
self._value = int(value * multiplier) | 132,653 |
Constructor.
Args:
initializer: A string or another RDFURN.
age: The age of this entry.
base: The base namespace this session id lives in.
queue: The queue to use.
flow_name: The name of this flow or its random id.
Raises:
InitializeError: The given URN cannot be converted to a SessionID. | def __init__(self,
initializer=None,
age=None,
base="aff4:/flows",
queue=DEFAULT_FLOW_QUEUE,
flow_name=None):
if initializer is None:
# This SessionID is being constructed from scratch.
if flow_name is None:
flow_name = random.UInt32()
if isinstance(flow_name, int):
initializer = RDFURN(base).Add("%s:%X" % (queue.Basename(), flow_name))
else:
initializer = RDFURN(base).Add("%s:%s" % (queue.Basename(), flow_name))
else:
if isinstance(initializer, RDFURN):
try:
self.ValidateID(initializer.Basename())
except ValueError as e:
raise InitializeError(
"Invalid URN for SessionID: %s, %s" % (initializer, e))
super(SessionID, self).__init__(initializer=initializer, age=age) | 132,654 |
Initialize.
Args:
launchdjobs: NSCFArray of NSCFDictionarys containing launchd job data from
the ServiceManagement framework. | def __init__(self, launchdjobs):
self.launchdjobs = launchdjobs
self.blacklist_regex = [
re.compile(r"^0x[a-z0-9]+\.anonymous\..+$"),
re.compile(r"^0x[a-z0-9]+\.mach_init\.(crash_inspector|Inspector)$"),
] | 132,657 |
Should this job be filtered.
Args:
launchditem: job NSCFDictionary
Returns:
True if the item should be filtered (dropped) | def FilterItem(self, launchditem):
for regex in self.blacklist_regex:
if regex.match(launchditem.get("Label", "")):
return True
return False | 132,658 |
Trims a given list so that it is not longer than given limit.
Args:
lst: A list to trim.
limit: A maximum number of elements in the list after trimming.
Returns:
A suffix of the input list that was trimmed. | def Trim(lst, limit):
limit = max(0, limit)
clipping = lst[limit:]
del lst[limit:]
return clipping | 132,660 |
Groups items by given key function.
Args:
items: An iterable or an iterator of items.
key: A function which given each item will return the key.
Returns:
A dict with keys being each unique key and values being a list of items of
that key. | def Group(items, key):
result = {}
for item in items:
result.setdefault(key(item), []).append(item)
return result | 132,661 |
Divide items into batches of specified size.
In case where number of items is not evenly divisible by the batch size, the
last batch is going to be shorter.
Args:
items: An iterable or an iterator of items.
size: A size of the returned batches.
Yields:
Lists of items with specified size. | def Batch(items, size):
batch = []
for item in items:
batch.append(item)
if len(batch) == size:
yield batch
batch = []
if batch:
yield batch | 132,662 |
Checks whether an items of one iterable are a prefix of another.
Args:
this: An iterable that needs to be checked.
that: An iterable of which items must match the prefix of `this`.
Returns:
`True` if `that` is a prefix of `this`, `False` otherwise. | def StartsWith(this, that):
this_iter = iter(this)
that_iter = iter(that)
while True:
try:
this_value = next(that_iter)
except StopIteration:
return True
try:
that_value = next(this_iter)
except StopIteration:
return False
if this_value != that_value:
return False | 132,663 |
Unzips specified iterable of pairs to pair of two iterables.
This function is an inversion of the standard `zip` function and the following
hold:
* ∀ l, r. l, r == unzip(zip(l, r))
* ∀ p. p == zip(unzip(p))
Examples:
>>> Unzip([("foo", 1), ("bar", 2), ("baz", 3)])
(["foo", "bar", "baz"], [1, 2, 3])
Args:
iterable: An iterable of pairs to unzip.
Returns:
A pair of iterables after unzipping. | def Unzip(iterable):
lefts = []
rights = []
for left, right in iterable:
lefts.append(left)
rights.append(right)
return lefts, rights | 132,664 |
r"""A generalized field-based parser.
Handles whitespace, csv etc.
Args:
comments: Line comment patterns (e.g. "#").
cont: Continuation patterns (e.g. "\\").
ml_quote: Boolean flag to allow quoted strings to span lines.
quot: Quotation patterns (e.g. "\\"" or "'").
sep: Field separator patterns (e.g. "[\\s,]").
term: Entry termination patterns (e.g. "\\n").
verbose: Enable verbose mode for the lexer. Useful for debugging. | def __init__(self,
comments=r"#",
cont=r"\\\s*\n",
ml_quote=False,
quot=(r"\"", r"'"),
sep=r"[ \t\f\v]+",
term=r"[\r\n]",
verbose=0):
r
super(FieldParser, self).__init__()
self.entries = []
self.fields = []
self.field = ""
self.comments = AsIter(comments)
self.cont = AsIter(cont)
self.ml_quote = AsIter(ml_quote)
self.quot = AsIter(quot)
self.sep = AsIter(sep)
self.term = AsIter(term)
self.verbose = verbose
self._GenStates() | 132,669 |
Adds an entry for a configuration setting.
Args:
key: The name of the setting.
val: The value of the setting. | def _ParseEntry(self, key, val):
if key in self._repeated:
setting = self.section.setdefault(key, [])
setting.extend(val)
else:
self.section.setdefault(key, val) | 132,691 |
Create a new configuration section for each match clause.
Each match clause is added to the main config, and the criterion that will
trigger the match is recorded, as is the configuration.
Args:
val: The value following the 'match' keyword. | def _NewMatchSection(self, val):
section = {"criterion": val, "config": {}}
self.matches.append(section)
# Now add configuration items to config section of the match block.
self.section = section["config"]
# Switch to a match-specific processor on a new match_block.
self.processor = self._ParseMatchGrp | 132,693 |
Parse the sshd configuration.
Process each of the lines in the configuration file.
Assembes an sshd_config file into a dictionary with the configuration
keyword as the key, and the configuration settings as value(s).
Args:
stat: unused
file_object: An open configuration file object.
knowledge_base: unused
Yields:
The configuration as an rdfvalue. | def Parse(self, stat, file_object, knowledge_base):
_, _ = stat, knowledge_base
# Clean out any residual state.
self._field_parser.Flush()
lines = [
l.strip()
for l in utils.ReadFileBytesAsUnicode(file_object).splitlines()
]
for line in lines:
# Remove comments (will break if it includes a quoted/escaped #)
line = line.split("#")[0].strip()
if line:
self._field_parser.ParseLine(line)
for result in self._field_parser.GenerateResults():
yield result | 132,696 |
Build a TypeInfo type descriptor.
Args:
name: The name of the parameter that this Type info corresponds to.
default: The default value that should be specified if the parameter was
not set.
description: A string describing this flow argument.
friendly_name: A human readable name which may be provided.
hidden: Should the argument be hidden from the UI. | def __init__(self,
name="",
default=None,
description="",
friendly_name="",
hidden=False):
self.name = name
self.default = default
self.description = description
self.hidden = hidden
if not friendly_name:
friendly_name = name.replace("_", " ").capitalize()
self.friendly_name = friendly_name | 132,719 |
An arg which must be an RDFValue.
Args:
rdfclass: The RDFValue class that this arg must be.
**kwargs: Passthrough to base class. | def __init__(self, rdfclass=None, **kwargs):
super(RDFValueType, self).__init__(**kwargs)
self._type = self.rdfclass = rdfclass | 132,720 |
Validate an RDFValue instance.
Args:
value: An RDFValue instance or something which may be used to instantiate
the correct instance.
Raises:
TypeValueError: If the value is not a valid RDFValue instance or the
required type.
Returns:
A Valid RDFValue instance. | def Validate(self, value):
# Allow None as a default.
if value is None:
return
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
return self.rdfclass(value)
except rdfvalue.InitializeError:
raise TypeValueError("Value for arg %s should be an %s" %
(self.name, self.rdfclass.__name__))
return value | 132,721 |
An arg which must be an RDFStruct.
Args:
rdfclass: The RDFStruct subclass that this arg must be.
**kwargs: Passthrough to base class. | def __init__(self, rdfclass=None, **kwargs):
super(RDFStructDictType, self).__init__(**kwargs)
self._type = self.rdfclass = rdfclass | 132,722 |
Validate the value.
Args:
value: Value is expected to be a dict-like object that a given RDFStruct
can be initialized from.
Raises:
TypeValueError: If the value is not a valid dict-like object that a given
RDFStruct can be initialized from.
Returns:
A valid instance of self.rdfclass or None. | def Validate(self, value):
if value is None:
return None
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
r = self.rdfclass()
r.FromDict(value)
return r
except (AttributeError, TypeError, rdfvalue.InitializeError):
# AttributeError is raised if value contains items that don't
# belong to the given rdfstruct.
# TypeError will be raised if value is not a dict-like object.
raise TypeValueError("Value for arg %s should be an %s" %
(self.name, self.rdfclass.__name__))
return value | 132,723 |
Parse and validate the args.
Note we pop all the args we consume here - so if there are any args we dont
know about, args will not be an empty dict after this. This allows the same
args to be parsed by several TypeDescriptorSets.
Args:
args: A dictionary of arguments that this TypeDescriptorSet might use. If
this dict does not have a required parameter, we still yield its default
value.
Yields:
A (name, value) tuple of the parsed args. | def ParseArgs(self, args):
for descriptor in self:
# Get the value from the kwargs or, if not specified, the default.
value = args.pop(descriptor.name, None)
if value is None:
# No need to validate the default value.
value = descriptor.default
else:
try:
# Validate this value - this should raise if the value provided is not
# acceptable to the type descriptor.
value = descriptor.Validate(value)
except Exception:
logging.error("Invalid value %s for arg %s", value, descriptor.name)
raise
yield descriptor.name, value | 132,729 |
Create a multichoice object and validate choices.
Args:
choices: list of available choices
validator: validator to use for each of the list *items* the validator for
the top level is a list.
**kwargs: passed through to parent class. | def __init__(self, choices=None, validator=None, **kwargs):
self.choices = choices
subvalidator = validator or String()
self.validator = List(validator=subvalidator)
# Check the choices match the validator
for choice in self.choices:
subvalidator.Validate(choice)
super(MultiChoice, self).__init__(**kwargs) | 132,742 |
Enumerate all MAC addresses of all NICs.
Args:
args: Unused.
Yields:
`rdf_client_network.Interface` instances. | def EnumerateInterfacesFromClient(args):
del args # Unused.
pythoncom.CoInitialize()
for interface in (wmi.WMI().Win32_NetworkAdapterConfiguration() or []):
addresses = []
for ip_address in interface.IPAddress or []:
addresses.append(
rdf_client_network.NetworkAddress(human_readable_address=ip_address))
response = rdf_client_network.Interface(ifname=interface.Description)
if interface.MACAddress:
response.mac_address = binascii.unhexlify(
interface.MACAddress.replace(":", ""))
if addresses:
response.addresses = addresses
yield response | 132,754 |
Run a WMI query and return a result.
Args:
query: the WMI query to run.
baseobj: the base object for the WMI query.
Yields:
rdf_protodict.Dicts containing key value pairs from the resulting COM
objects. | def RunWMIQuery(query, baseobj=r"winmgmts:\root\cimv2"):
pythoncom.CoInitialize() # Needs to be called if using com from a thread.
wmi_obj = win32com.client.GetObject(baseobj)
# This allows our WMI to do some extra things, in particular
# it gives it access to find the executable path for all processes.
wmi_obj.Security_.Privileges.AddAsString("SeDebugPrivilege")
# Run query
try:
query_results = wmi_obj.ExecQuery(query)
except pythoncom.com_error as e:
raise RuntimeError("Failed to run WMI query \'%s\' err was %s" % (query, e))
# Extract results from the returned COMObject and return dicts.
try:
for result in query_results:
response = rdf_protodict.Dict()
properties = (
list(result.Properties_) +
list(getattr(result, "SystemProperties_", [])))
for prop in properties:
if prop.Name not in IGNORE_PROPS:
# Protodict can handle most of the types we care about, but we may
# get some objects that we don't know how to serialize, so we tell the
# dict to set the value to an error message and keep going
response.SetItem(prop.Name, prop.Value, raise_on_error=False)
yield response
except pythoncom.com_error as e:
raise RuntimeError("WMI query data error on query \'%s\' err was %s" %
(e, query)) | 132,758 |
Filters out duplicates from passed PathInfo objects.
Args:
path_infos: An iterable with PathInfo objects.
Returns:
A list of PathInfo objects with duplicates removed. Duplicates are
removed following this logic: they're sorted by (ctime, mtime, atime,
inode number) in the descending order and then the first one is taken
and the others are dropped. | def _FilterOutPathInfoDuplicates(path_infos):
pi_dict = {}
for pi in path_infos:
path_key = (pi.path_type, pi.GetPathID())
pi_dict.setdefault(path_key, []).append(pi)
def _SortKey(pi):
return (
pi.stat_entry.st_ctime,
pi.stat_entry.st_mtime,
pi.stat_entry.st_atime,
pi.stat_entry.st_ino,
)
for pi_values in pi_dict.values():
if len(pi_values) > 1:
pi_values.sort(key=_SortKey, reverse=True)
return [v[0] for v in pi_dict.values()] | 132,784 |
Persists information about stat entries.
Args:
stat_entries: A list of `StatEntry` instances.
client_id: An id of a client the stat entries come from.
mutation_pool: A mutation pool used for writing into the AFF4 data store.
token: A token used for writing into the AFF4 data store. | def WriteStatEntries(stat_entries, client_id, mutation_pool, token=None):
for stat_response in stat_entries:
if stat_response.pathspec.last.stream_name:
# This is an ads. In that case we always need to create a file or
# we won't be able to access the data. New clients send the correct mode
# already but to make sure, we set this to a regular file anyways.
# Clear all file type bits:
stat_response.st_mode &= ~stat_type_mask
stat_response.st_mode |= stat.S_IFREG
if data_store.AFF4Enabled():
for stat_entry in stat_entries:
CreateAFF4Object(
stat_entry,
client_id_urn=rdf_client.ClientURN(client_id),
mutation_pool=mutation_pool,
token=token)
if data_store.RelationalDBEnabled():
path_infos = [rdf_objects.PathInfo.FromStatEntry(s) for s in stat_entries]
# NOTE: TSK may return duplicate entries. This is may be either due to
# a bug in TSK implementation, or due to the fact that TSK is capable
# of returning deleted files information. Our VFS data model only supports
# storing multiple versions of the files when we collect the versions
# ourselves. At the moment we can't store multiple versions of the files
# "as returned by TSK".
#
# Current behaviour is to simply drop excessive version before the
# WritePathInfo call. This way files returned by TSK will still make it
# into the flow's results, but not into the VFS data.
data_store.REL_DB.WritePathInfos(client_id,
_FilterOutPathInfoDuplicates(path_infos)) | 132,785 |
Find the node in the component_tree from component_path.
Args:
component_path: A list of components which reference a node in the
component tree. This allows us to resume processing in the tree.
Returns:
A node in the component_tree. | def FindNode(self, component_path):
# Find the node that the component path is referring to.
node = self.state.component_tree
for component in component_path:
node = node[component]
return node | 132,792 |
Init.
Args:
file_object: A file like object.
delete_tempfile: If we create a tempfile, should we delete it when
we're done.
journal_mode: If set to "WAL" a "Write-Ahead Log" is created. | def __init__(self, file_object, delete_tempfile=True, journal_mode="DELETE"):
self.file_object = file_object
self.journal_mode = journal_mode
# We want to be able to read from arbitrary file like objects
# but sqlite lib doesn't support this so we need to write out
# to a tempfile.
if hasattr(self.file_object, "name"):
self.name = self.file_object.name
self._delete_file = False
else:
self._delete_file = delete_tempfile
with tempfile.NamedTemporaryFile(delete=False) as fd:
self.name = fd.name
data = file_object.read(65536)
while data:
fd.write(data)
data = file_object.read(65536) | 132,797 |
Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
token: An ACL token.
Returns:
A dict with a list of all known GRR client_ids for each hostname. | def GetClientURNsForHostnames(hostnames, token=None):
if data_store.RelationalDBEnabled():
index = ClientIndex()
else:
index = CreateClientIndex(token=token)
keywords = set()
for hostname in hostnames:
if hostname.startswith("host:"):
keywords.add(hostname)
else:
keywords.add("host:%s" % hostname)
results = index.ReadClientPostingLists(keywords)
result = {}
for keyword, hits in iteritems(results):
result[keyword[len("host:"):]] = hits
return result | 132,801 |
Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable. | def LookupClients(self, keywords):
if isinstance(keywords, string_types):
raise ValueError(
"Keywords should be an iterable, not a string (got %s)." % keywords)
start_time, end_time, filtered_keywords, unversioned_keywords = (
self._AnalyzeKeywords(keywords))
last_seen_map = None
if unversioned_keywords:
last_seen_map = {}
# TODO(user): Make keyword index datetime aware so that
# AsMicrosecondsSinceEpoch is unnecessary.
raw_results = self.Lookup(
list(map(self._NormalizeKeyword, filtered_keywords)),
start_time=start_time.AsMicrosecondsSinceEpoch(),
end_time=end_time.AsMicrosecondsSinceEpoch(),
last_seen_map=last_seen_map)
if not raw_results:
return []
if unversioned_keywords:
universal_last_seen_raw = {}
self.ReadPostingLists(
list(map(self._NormalizeKeyword, raw_results)),
start_time=start_time.AsMicrosecondsSinceEpoch(),
end_time=end_time.AsMicrosecondsSinceEpoch(),
last_seen_map=universal_last_seen_raw)
universal_last_seen = {}
for (_, client_id), ts in iteritems(universal_last_seen_raw):
universal_last_seen[client_id] = ts
old_results = set()
for keyword in unversioned_keywords:
for result in raw_results:
if last_seen_map[(keyword, result)] < universal_last_seen[result]:
old_results.add(result)
raw_results -= old_results
return [rdf_client.ClientURN(result) for result in raw_results] | 132,805 |
Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients. | def ReadClientPostingLists(self, keywords):
start_time, end_time, filtered_keywords, _ = self._AnalyzeKeywords(keywords)
# TODO(user): Make keyword index datetime aware so that
# AsMicrosecondsSinceEpoch is unecessary.
return self.ReadPostingLists(
filtered_keywords,
start_time=start_time.AsMicrosecondsSinceEpoch(),
end_time=end_time.AsMicrosecondsSinceEpoch()) | 132,806 |
Finds the client_id and keywords for a client.
Args:
client: A VFSGRRClient record to find keywords for.
Returns:
A tuple (client_id, keywords) where client_id is the client identifier and
keywords is a list of keywords related to client. | def AnalyzeClient(self, client):
client_id = self._ClientIdFromURN(client.urn)
# Start with both the client id itself, and a universal keyword, used to
# find all clients.
#
# TODO(user): Remove the universal keyword once we have a better way
# to do this, i.e., once we have a storage library which can list all
# clients directly.
keywords = [self._NormalizeKeyword(client_id), "."]
def TryAppend(prefix, keyword):
precondition.AssertType(prefix, Text)
if keyword:
keyword_string = self._NormalizeKeyword(Text(keyword))
keywords.append(keyword_string)
if prefix:
keywords.append(prefix + ":" + keyword_string)
def TryAppendPrefixes(prefix, keyword, delimiter):
if keyword is None:
return 0
TryAppend(prefix, keyword)
segments = keyword.split(delimiter)
for i in range(1, len(segments)):
TryAppend(prefix, delimiter.join(segments[0:i]))
return len(segments)
def TryAppendIP(ip):
TryAppend("ip", ip)
# IP4v?
if TryAppendPrefixes("ip", str(ip), ".") == 4:
return
# IP6v?
TryAppendPrefixes("ip", str(ip), ":")
def TryAppendMac(mac):
TryAppend("mac", mac)
if len(mac) == 12:
# If looks like a mac address without ":" symbols, also add the keyword
# with them.
TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)]))
s = client.Schema
TryAppend("host", client.Get(s.HOSTNAME))
TryAppendPrefixes("host", client.Get(s.HOSTNAME), "-")
TryAppend("host", client.Get(s.FQDN))
TryAppendPrefixes("host", client.Get(s.FQDN), ".")
TryAppend("", client.Get(s.SYSTEM))
TryAppend("", client.Get(s.UNAME))
TryAppend("", client.Get(s.OS_RELEASE))
TryAppend("", client.Get(s.OS_VERSION))
TryAppend("", client.Get(s.KERNEL))
TryAppend("", client.Get(s.ARCH))
kb = client.Get(s.KNOWLEDGE_BASE)
if kb:
for user in kb.users:
TryAppend("user", user.username)
TryAppend("", user.full_name)
if user.full_name:
for name in user.full_name.split():
# full_name often includes nicknames and similar, wrapped in
# punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common
# wrapping characters.
TryAppend("", name.strip("\"'()"))
for username in client.Get(s.USERNAMES, []):
TryAppend("user", username)
for interface in client.Get(s.INTERFACES, []):
if interface.mac_address:
TryAppendMac(interface.mac_address.human_readable_address)
for ip in interface.GetIPAddresses():
TryAppendIP(ip)
# We should have all mac and ip addresses already, but some test data only
# has it attached directly, so just in case we look there also.
if client.Get(s.MAC_ADDRESS):
for mac in str(client.Get(s.MAC_ADDRESS)).split("\n"):
TryAppendMac(mac)
ip_list = client.Get(s.HOST_IPS, "")
for ip in str(ip_list).split("\n"):
TryAppendIP(ip)
client_info = client.Get(s.CLIENT_INFO)
if client_info:
TryAppend("client", client_info.client_name)
TryAppend("client", client_info.client_version)
if client_info.labels:
for label in client_info.labels:
TryAppend("label", label)
for label in client.GetLabelsNames():
TryAppend("label", label)
return client_id, keywords | 132,807 |
Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update. | def AddClient(self, client):
client_id, keywords = self.AnalyzeClient(client)
self.AddKeywordsForName(client_id, keywords) | 132,808 |
Removes all labels for a given client object.
Args:
client: A VFSGRRClient record. | def RemoveClientLabels(self, client):
keywords = []
for label in client.GetLabelsNames():
keyword = self._NormalizeKeyword(utils.SmartStr(label))
# This might actually delete a keyword with the same name as the label (if
# there is one). Usually the client keywords will be rebuilt after the
# deletion of the old labels though, so this can only destroy historic
# index data; normal search functionality will not be affected.
keywords.append(keyword)
keywords.append("label:%s" % keyword)
self.RemoveKeywordsForName(self._ClientIdFromURN(client.urn), keywords) | 132,809 |
Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable. | def LookupClients(self, keywords):
if isinstance(keywords, string_types):
raise ValueError(
"Keywords should be an iterable, not a string (got %s)." % keywords)
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
keyword_map = data_store.REL_DB.ListClientsForKeywords(
list(map(self._NormalizeKeyword, filtered_keywords)),
start_time=start_time)
results = itervalues(keyword_map)
relevant_set = set(next(results))
for hits in results:
relevant_set &= set(hits)
if not relevant_set:
return []
return sorted(relevant_set) | 132,811 |
Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients. | def ReadClientPostingLists(self, keywords):
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
return data_store.REL_DB.ListClientsForKeywords(
filtered_keywords, start_time=start_time) | 132,812 |
Finds the client_id and keywords for a client.
Args:
client: A Client object record to find keywords for.
Returns:
A list of keywords related to client. | def AnalyzeClient(self, client):
# Start with a universal keyword, used to find all clients.
#
# TODO(user): Remove the universal keyword once we have a better way
# to do this, i.e., once we have a storage library which can list all
# clients directly.
keywords = set(["."])
def TryAppend(prefix, keyword):
precondition.AssertType(prefix, Text)
precondition.AssertType(keyword, Text)
if keyword:
keyword_string = self._NormalizeKeyword(keyword)
keywords.add(keyword_string)
if prefix:
keywords.add(prefix + ":" + keyword_string)
def TryAppendPrefixes(prefix, keyword, delimiter):
TryAppend(prefix, keyword)
segments = keyword.split(delimiter)
for i in range(1, len(segments)):
TryAppend(prefix, delimiter.join(segments[0:i]))
return len(segments)
def TryAppendIP(ip):
TryAppend("ip", ip)
# IP4v?
if TryAppendPrefixes("ip", Text(ip), ".") == 4:
return
# IP6v?
TryAppendPrefixes("ip", Text(ip), ":")
def TryAppendMac(mac):
TryAppend("mac", mac)
if len(mac) == 12:
# If looks like a mac address without ":" symbols, also add the keyword
# with them.
TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)]))
TryAppend("host", client.knowledge_base.fqdn)
host = client.knowledge_base.fqdn.split(".", 1)[0]
TryAppendPrefixes("host", host, "-")
TryAppendPrefixes("host", client.knowledge_base.fqdn, ".")
TryAppend("", client.knowledge_base.os)
TryAppend("", client.Uname())
TryAppend("", client.os_release)
TryAppend("", client.os_version)
TryAppend("", client.kernel)
TryAppend("", client.arch)
kb = client.knowledge_base
if kb:
for user in kb.users:
TryAppend("user", user.username)
TryAppend("", user.full_name)
if user.full_name:
for name in user.full_name.split():
# full_name often includes nicknames and similar, wrapped in
# punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common
# wrapping characters.
TryAppend("", name.strip("\"'()"))
for ip in client.GetIPAddresses():
TryAppendIP(ip)
for mac in client.GetMacAddresses():
TryAppendMac(mac)
client_info = client.startup_info.client_info
if client_info:
TryAppend("client", client_info.client_name)
TryAppend("client", Text(client_info.client_version))
if client_info.labels:
for label in client_info.labels:
TryAppend("label", label)
return keywords | 132,813 |
Adds a client to the index.
Args:
client: A Client object record. | def AddClient(self, client):
keywords = self.AnalyzeClient(client)
keywords.add(self._NormalizeKeyword(client.client_id))
data_store.REL_DB.AddClientKeywords(client.client_id, keywords) | 132,814 |
Removes all labels for a given client.
Args:
client_id: The client_id. | def RemoveAllClientLabels(self, client_id):
labels_to_remove = set(
[l.name for l in data_store.REL_DB.ReadClientLabels(client_id)])
self.RemoveClientLabels(client_id, labels_to_remove) | 132,816 |
Removes all labels for a given client.
Args:
client_id: The client_id.
labels: A list of labels to remove. | def RemoveClientLabels(self, client_id, labels):
for label in labels:
keyword = self._NormalizeKeyword(label)
# This might actually delete a keyword with the same name as the label (if
# there is one).
data_store.REL_DB.RemoveClientKeyword(client_id, keyword)
data_store.REL_DB.RemoveClientKeyword(client_id, "label:%s" % keyword) | 132,817 |
Enrols a Fleetspeak-enabled client for use with GRR.
Args:
client_id: GRR client-id for the client.
Returns:
True if the client is new, and actually got enrolled. This method
is a no-op if the client already exists (in which case False is returned). | def EnrolFleetspeakClient(self, client_id):
client_urn = rdf_client.ClientURN(client_id)
# If already enrolled, return.
if data_store.RelationalDBEnabled():
try:
data_store.REL_DB.ReadClientMetadata(client_id)
return False
except db.UnknownClientError:
pass
else:
if aff4.FACTORY.ExistsWithType(
client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token):
return False
logging.info("Enrolling a new Fleetspeak client: %r", client_id)
if data_store.RelationalDBEnabled():
now = rdfvalue.RDFDatetime.Now()
data_store.REL_DB.WriteClientMetadata(
client_id, first_seen=now, fleetspeak_enabled=True, last_ping=now)
if data_store.AFF4Enabled():
# TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
# catch exceptions from it and forward them to Fleetspeak by failing its
# gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
# instance of the GRR frontend.
with aff4.FACTORY.Create(
client_urn,
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client:
client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))
index = client_index.CreateClientIndex(token=self.token)
index.AddClient(client)
if data_store.RelationalDBEnabled():
client_obj = rdf_objects.ClientSnapshot(
client_id=client_urn.Basename())
index = client_index.ClientIndex()
index.AddClient(client_obj)
# Publish the client enrollment message.
events.Events.PublishEvent("ClientEnrollment", client_urn, token=self.token)
return True | 132,827 |
Receives and processes messages for flows stored in the relational db.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues. | def ReceiveMessagesRelationalFlows(self, client_id, messages):
now = time.time()
unprocessed_msgs = []
message_handler_requests = []
dropped_count = 0
for session_id, msgs in iteritems(
collection.Group(messages, operator.attrgetter("session_id"))):
# Remove and handle messages to WellKnownFlows
leftover_msgs = self.HandleWellKnownFlows(msgs)
for msg in leftover_msgs:
if (msg.auth_state != msg.AuthorizationState.AUTHENTICATED and
msg.session_id != self.unauth_allowed_session_id):
dropped_count += 1
continue
if session_id in queue_manager.session_id_map:
message_handler_requests.append(
rdf_objects.MessageHandlerRequest(
client_id=msg.source.Basename(),
handler_name=queue_manager.session_id_map[session_id],
request_id=msg.response_id,
request=msg.payload))
else:
unprocessed_msgs.append(msg)
if dropped_count:
logging.info("Dropped %d unauthenticated messages for %s", dropped_count,
client_id)
if unprocessed_msgs:
flow_responses = []
for message in unprocessed_msgs:
flow_responses.append(
rdf_flow_objects.FlowResponseForLegacyResponse(message))
data_store.REL_DB.WriteFlowResponses(flow_responses)
for msg in unprocessed_msgs:
if msg.type == rdf_flows.GrrMessage.Type.STATUS:
stat = rdf_flows.GrrStatus(msg.payload)
if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:
# A client crashed while performing an action, fire an event.
crash_details = rdf_client.ClientCrash(
client_id=client_id,
session_id=msg.session_id,
backtrace=stat.backtrace,
crash_message=stat.error_message,
nanny_status=stat.nanny_status,
timestamp=rdfvalue.RDFDatetime.Now())
events.Events.PublishEvent(
"ClientCrash", crash_details, token=self.token)
if message_handler_requests:
data_store.REL_DB.WriteMessageHandlerRequests(message_handler_requests)
logging.debug("Received %s messages from %s in %s sec", len(messages),
client_id,
time.time() - now) | 132,828 |
Receives and processes the messages from the source.
For each message we update the request object, and place the
response in that request's queue. If the request is complete, we
send a message to the worker.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues. | def ReceiveMessages(self, client_id, messages):
if data_store.RelationalDBEnabled():
return self.ReceiveMessagesRelationalFlows(client_id, messages)
now = time.time()
with queue_manager.QueueManager(token=self.token) as manager:
for session_id, msgs in iteritems(
collection.Group(messages, operator.attrgetter("session_id"))):
# Remove and handle messages to WellKnownFlows
leftover_msgs = self.HandleWellKnownFlows(msgs)
unprocessed_msgs = []
for msg in leftover_msgs:
if (msg.auth_state == msg.AuthorizationState.AUTHENTICATED or
msg.session_id == self.unauth_allowed_session_id):
unprocessed_msgs.append(msg)
if len(unprocessed_msgs) < len(leftover_msgs):
logging.info("Dropped %d unauthenticated messages for %s",
len(leftover_msgs) - len(unprocessed_msgs), client_id)
if not unprocessed_msgs:
continue
for msg in unprocessed_msgs:
manager.QueueResponse(msg)
for msg in unprocessed_msgs:
# Messages for well known flows should notify even though they don't
# have a status.
if msg.request_id == 0:
manager.QueueNotification(session_id=msg.session_id)
# Those messages are all the same, one notification is enough.
break
elif msg.type == rdf_flows.GrrMessage.Type.STATUS:
# If we receive a status message from the client it means the client
# has finished processing this request. We therefore can de-queue it
# from the client queue. msg.task_id will raise if the task id is
# not set (message originated at the client, there was no request on
# the server), so we have to check .HasTaskID() first.
if msg.HasTaskID():
manager.DeQueueClientRequest(msg)
manager.QueueNotification(
session_id=msg.session_id, last_status=msg.request_id)
stat = rdf_flows.GrrStatus(msg.payload)
if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:
# A client crashed while performing an action, fire an event.
crash_details = rdf_client.ClientCrash(
client_id=client_id,
session_id=session_id,
backtrace=stat.backtrace,
crash_message=stat.error_message,
nanny_status=stat.nanny_status,
timestamp=rdfvalue.RDFDatetime.Now())
events.Events.PublishEvent(
"ClientCrash", crash_details, token=self.token)
logging.debug("Received %s messages from %s in %s sec", len(messages),
client_id,
time.time() - now) | 132,829 |
Executes one of the predefined commands.
Args:
command: An `ExecuteRequest` object.
Yields:
`rdf_client_action.ExecuteResponse` objects. | def ExecuteCommandFromClient(command):
cmd = command.cmd
args = command.args
time_limit = command.time_limit
res = client_utils_common.Execute(cmd, args, time_limit)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
yield rdf_client_action.ExecuteResponse(
request=command,
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used)) | 132,833 |
Call os.statvfs for a given list of paths.
Args:
args: An `rdf_client_action.StatFSRequest`.
Yields:
`rdf_client_fs.UnixVolume` instances.
Raises:
RuntimeError: if called on a Windows system. | def StatFSFromClient(args):
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
for path in args.path_list:
try:
fd = vfs.VFSOpen(rdf_paths.PathSpec(path=path, pathtype=args.pathtype))
st = fd.StatFS()
mount_point = fd.GetMountPoint()
except (IOError, OSError):
continue
unix = rdf_client_fs.UnixVolume(mount_point=mount_point)
# On linux pre 2.6 kernels don't have frsize, so we fall back to bsize.
# The actual_available_allocation_units attribute is set to blocks
# available to the unprivileged user, root may have some additional
# reserved space.
yield rdf_client_fs.Volume(
bytes_per_sector=(st.f_frsize or st.f_bsize),
sectors_per_allocation_unit=1,
total_allocation_units=st.f_blocks,
actual_available_allocation_units=st.f_bavail,
unixvolume=unix) | 132,835 |
Creates a ZIP archive of the files in the input directory.
Args:
input_dir: the name of the input directory.
output_file: the name of the output ZIP archive without extension. | def MakeZip(self, input_dir, output_file):
logging.info("Generating zip template file at %s", output_file)
basename, _ = os.path.splitext(output_file)
# TODO(user):pytype: incorrect make_archive() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.make_archive(
basename, "zip", base_dir=".", root_dir=input_dir, verbose=True) | 132,870 |
Validates a Fleetspeak service config.
Checks that the given file is a valid TextFormat representation of
a Fleetspeak service config proto.
Args:
config_path: Path to the config file.
Raises:
BuildError: If the config is not valid. | def _ValidateFleetspeakServiceConfig(self, config_path):
with open(config_path, "rb") as f:
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(fs_config_pb2.Config.DESCRIPTOR)
parsed_config = text_format.Parse(
f.read(), fs_system_pb2.ClientServiceConfig(), descriptor_pool=pool)
if parsed_config.factory != "Daemon":
raise BuildError(
"Fleetspeak config does not have the expected factory type.")
daemon_cfg = fs_config_pb2.Config()
parsed_config.config.Unpack(daemon_cfg)
if not daemon_cfg.argv:
raise BuildError(
"Fleetspeak daemon service config does not specify command line "
"args.") | 132,877 |
Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file | def MakeSelfExtractingZip(self, payload_data, output_path):
context = self.context + ["Client Context"]
src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode="r")
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(
zip_data, mode="w", compression=zipfile.ZIP_DEFLATED)
config_file_name = config.CONFIG.Get(
"ClientBuilder.config_filename", context=context)
# Copy the rest of the files from the package to the new zip.
for template_file in src_zip.namelist():
if template_file != config_file_name:
# Avoid writing the config file twice if we're repacking a binary that
# has already been run through deployment. We write it in the next step,
# so no need to copy over from the original here.
CopyFileInZip(src_zip, template_file, output_zip)
client_config_content = self.GetClientConfig(context)
output_zip.writestr(
config_file_name,
client_config_content.encode("utf-8"),
compress_type=zipfile.ZIP_STORED)
# The zip file comment is used by the self extractor to run the installation
# script. Comment has to be `bytes` object because `zipfile` module is not
# smart enough to properly handle `unicode` objects. We use the `encode`
# method instead of `SmartStr` because we expect this option to be an
# `unicode` object and in case it is not, we want it to blow up.
output_zip.comment = b"$AUTORUN$>%s" % config.CONFIG.Get(
"ClientBuilder.autorun_command_line", context=context).encode("utf-8")
output_zip.close()
utils.EnsureDirExists(os.path.dirname(output_path))
with open(output_path, "wb") as fd:
# First write the installer stub
stub_data = io.BytesIO()
unzipsfx_stub = config.CONFIG.Get(
"ClientBuilder.unzipsfx_stub", context=context)
stub_raw = open(unzipsfx_stub, "rb").read()
# Check stub has been compiled with the requireAdministrator manifest.
if b"level=\"requireAdministrator" not in stub_raw:
raise RuntimeError("Bad unzip binary in use. Not compiled with the"
"requireAdministrator manifest option.")
stub_data.write(stub_raw)
# If in verbose mode, modify the unzip bins PE header to run in console
# mode for easier debugging.
SetPeSubsystem(
stub_data,
console=config.CONFIG.Get("ClientBuilder.console", context=context))
# Now patch up the .rsrc section to contain the payload.
end_of_file = zip_data.tell() + stub_data.tell()
# This is the IMAGE_SECTION_HEADER.Name which is also the start of
# IMAGE_SECTION_HEADER.
offset_to_rsrc = stub_data.getvalue().find(b".rsrc")
# IMAGE_SECTION_HEADER.PointerToRawData is a 32 bit int.
stub_data.seek(offset_to_rsrc + 20)
start_of_rsrc_section = struct.unpack("<I", stub_data.read(4))[0]
# Adjust IMAGE_SECTION_HEADER.SizeOfRawData to span from the old start to
# the end of file.
stub_data.seek(offset_to_rsrc + 16)
stub_data.write(struct.pack("<I", end_of_file - start_of_rsrc_section))
# Concatenate stub and zip file.
out_data = io.BytesIO()
out_data.write(stub_data.getvalue())
out_data.write(zip_data.getvalue())
# Then write the actual output file.
fd.write(out_data.getvalue())
if self.signer:
self.signer.SignFile(output_path)
logging.info("Deployable binary generated at %s", output_path)
return output_path | 132,878 |
Upload a signed blob into the datastore.
Args:
content: File content to upload.
aff4_path: aff4 path to upload to.
client_context: The configuration contexts to use.
limit: The maximum size of the chunk to use.
token: A security token.
Raises:
IOError: On failure to write. | def UploadSignedConfigBlob(content,
aff4_path,
client_context=None,
limit=None,
token=None):
if limit is None:
limit = config.CONFIG["Datastore.maximum_blob_size"]
# Get the values of these parameters which apply to the client running on the
# target platform.
if client_context is None:
# Default to the windows client.
client_context = ["Platform:Windows", "Client Context"]
config.CONFIG.Validate(
parameters="PrivateKeys.executable_signing_private_key")
signing_key = config.CONFIG.Get(
"PrivateKeys.executable_signing_private_key", context=client_context)
verification_key = config.CONFIG.Get(
"Client.executable_signing_public_key", context=client_context)
signed_binary_utils.WriteSignedBinary(
rdfvalue.RDFURN(aff4_path),
content,
signing_key,
public_key=verification_key,
chunk_size=limit,
token=token)
logging.info("Uploaded to %s", aff4_path) | 132,898 |
Initializes the stat collector.
Args:
worker: A `GRRClientWorker` instance that spawned this stat collector. | def __init__(self, worker):
super(ClientStatsCollector, self).__init__()
self.daemon = True
self._worker = worker
self._process = psutil.Process()
self._cpu_samples = []
self._io_samples = []
self._last_send_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)
self._should_send = False | 132,977 |
Whether the conditions applies, modulo host data.
Args:
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
True if os_name, cpe or labels match. Empty values are ignored. | def Artifacts(self, os_name=None, cpe=None, label=None):
hit = lambda x: x[0] == x[1] or not x[0]
seq = [(self.os_name, os_name), (self.cpe, cpe), (self.label, label)]
return all(map(hit, seq)) | 132,986 |
Whether the condition contains the specified values.
Args:
artifact: A string identifier for the artifact.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
True if the values match the non-empty query attributes.
Empty query attributes are ignored in the comparison. | def Search(self, artifact, os_name=None, cpe=None, label=None):
hit = lambda x: x[0] == x[1] or not x[0]
seq = [(artifact, self.artifact), (os_name, self.os_name), (cpe, self.cpe),
(label, self.label)]
return all(map(hit, seq)) | 132,987 |
Add criteria for a check.
Args:
artifact: An artifact name.
target: A tuple of artifact necessary to process the data.
callback: Entities that should be called if the condition matches. | def Add(self, artifact=None, target=None, callback=None):
# Cases where a target field is undefined or empty need special handling.
# Repeated field helper in target yields results, so expand this out into a
# list. If the attribute doesn't exist, default to an empty list.
# Then, in either case, replace the empty list with one containing a single
# None value.
if target is None:
target = Target()
os_name = target.Get("os") or [None]
cpe = target.Get("cpe") or [None]
label = target.Get("label") or [None]
attributes = itertools.product(os_name, cpe, label)
new_conditions = [Condition(artifact, *attr) for attr in attributes]
self.conditions.update(new_conditions)
self._Register(new_conditions, callback) | 132,989 |
Adds existing triggers to this set, optionally rebuilding the registry.
Used to aggregate trigger methods from Probes to Methods to Checks.
Args:
other: Another Triggers object.
callback: Registers all the updated triggers to the specified function. | def Update(self, other, callback):
self.conditions.update(other.conditions)
self._Register(other.conditions, callback) | 132,990 |
Test if host data should trigger a check.
Args:
artifact: An artifact name.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of conditions that match. | def Match(self, artifact=None, os_name=None, cpe=None, label=None):
return [
c for c in self.conditions if c.Match(artifact, os_name, cpe, label)
] | 132,991 |
Find the host attributes that trigger data collection.
Args:
artifact: An artifact name.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of conditions that contain the specified attributes. | def Search(self, artifact=None, os_name=None, cpe=None, label=None):
return [
c for c in self.conditions if c.Search(artifact, os_name, cpe, label)
] | 132,992 |
Find the artifacts that correspond with other trigger conditions.
Args:
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of artifacts to be processed. | def Artifacts(self, os_name=None, cpe=None, label=None):
return [
c.artifact for c in self.conditions if c.Artifacts(os_name, cpe, label)
] | 132,993 |
Find the methods that evaluate data that meets this condition.
Args:
conditions: A tuple of (artifact, os_name, cpe, label)
Returns:
A list of methods that evaluate the data. | def Calls(self, conditions=None):
results = set()
if conditions is None:
conditions = [None]
for condition in conditions:
for c in self.Match(*condition):
results.update(self._registry.get(c, []))
return results | 132,994 |
Generate a CA certificate.
Args:
private_key: The private key to use.
common_name: Name for cert.
issuer_cn: Name for issuer.
issuer_c: Country for issuer.
Returns:
The certificate. | def MakeCACert(private_key,
common_name=u"grr",
issuer_cn=u"grr_test",
issuer_c=u"US"):
public_key = private_key.GetPublicKey()
builder = x509.CertificateBuilder()
issuer = x509.Name([
x509.NameAttribute(oid.NameOID.COMMON_NAME, issuer_cn),
x509.NameAttribute(oid.NameOID.COUNTRY_NAME, issuer_c)
])
subject = x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME, common_name)])
builder = builder.subject_name(subject)
builder = builder.issuer_name(issuer)
valid_from = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1d")
valid_until = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("3650d")
builder = builder.not_valid_before(valid_from.AsDatetime())
builder = builder.not_valid_after(valid_until.AsDatetime())
builder = builder.serial_number(1)
builder = builder.public_key(public_key.GetRawPublicKey())
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True)
builder = builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(public_key.GetRawPublicKey()),
critical=False)
certificate = builder.sign(
private_key=private_key.GetRawPrivateKey(),
algorithm=hashes.SHA256(),
backend=openssl.backend)
return rdf_crypto.RDFX509Cert(certificate) | 132,997 |
Writes graph series for a particular client label to the DB.
Args:
graph_series: A series of rdf_stats.Graphs containing aggregated data for a
particular report-type.
label: Client label by which data in the graph_series was aggregated.
token: ACL token to use for writing to the legacy (non-relational)
datastore.
Raises:
AFF4AttributeTypeError: If, when writing to the legacy DB, an unexpected
report-data type is encountered. | def WriteGraphSeries(graph_series,
label,
token = None):
if data_store.RelationalDBEnabled():
data_store.REL_DB.WriteClientGraphSeries(graph_series, label)
if _ShouldUseLegacyDatastore():
# We need to use the __call__() method of the aff4.Attribute class
# to instantiate Graph and GraphSeries objects, or AFF4Object.AddAttribute()
# won't work.
aff4_attr = _GetAFF4AttributeForReportType(graph_series.report_type)()
if isinstance(aff4_attr, rdf_stats.GraphSeries):
for graph in graph_series.graphs:
aff4_attr.Append(graph)
elif isinstance(aff4_attr, rdf_stats.Graph):
for sample in graph_series.graphs[0]:
aff4_attr.Append(x_value=sample.x_value, y_value=sample.y_value)
else:
raise AFF4AttributeTypeError(aff4_attr.__class__)
with aff4.FACTORY.Create(
GetAFF4ClientReportsURN().Add(label),
aff4_type=aff4_stats.ClientFleetStats,
mode="w",
token=token) as stats_for_label:
stats_for_label.AddAttribute(aff4_attr) | 133,011 |
Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found. | def AddFilesWithUnknownHashes(
client_path_blob_refs,
use_external_stores = True
):
hash_id_blob_refs = dict()
client_path_hash_id = dict()
metadatas = dict()
all_client_path_blob_refs = list()
for client_path, blob_refs in iteritems(client_path_blob_refs):
# In the special case where there is only one blob, we don't need to go to
# the data store to read said blob and rehash it, we have all that
# information already available. For empty files without blobs, we can just
# hash the empty string instead.
if len(blob_refs) <= 1:
if blob_refs:
hash_id = rdf_objects.SHA256HashID.FromBytes(
blob_refs[0].blob_id.AsBytes())
else:
hash_id = rdf_objects.SHA256HashID.FromData(b"")
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = blob_refs
metadatas[hash_id] = FileMetadata(
client_path=client_path, blob_refs=blob_refs)
else:
for blob_ref in blob_refs:
all_client_path_blob_refs.append((client_path, blob_ref))
client_path_offset = collections.defaultdict(lambda: 0)
client_path_sha256 = collections.defaultdict(hashlib.sha256)
verified_client_path_blob_refs = collections.defaultdict(list)
client_path_blob_ref_batches = collection.Batch(
items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE)
for client_path_blob_ref_batch in client_path_blob_ref_batches:
blob_id_batch = set(
blob_ref.blob_id for _, blob_ref in client_path_blob_ref_batch)
blobs = data_store.BLOBS.ReadBlobs(blob_id_batch)
for client_path, blob_ref in client_path_blob_ref_batch:
blob = blobs[blob_ref.blob_id]
if blob is None:
message = "Could not find one of referenced blobs: {}".format(
blob_ref.blob_id)
raise BlobNotFoundError(message)
offset = client_path_offset[client_path]
if blob_ref.size != len(blob):
raise ValueError(
"Got conflicting size information for blob %s: %d vs %d." %
(blob_ref.blob_id, blob_ref.size, len(blob)))
if blob_ref.offset != offset:
raise ValueError(
"Got conflicting offset information for blob %s: %d vs %d." %
(blob_ref.blob_id, blob_ref.offset, offset))
verified_client_path_blob_refs[client_path].append(blob_ref)
client_path_offset[client_path] = offset + len(blob)
client_path_sha256[client_path].update(blob)
for client_path in iterkeys(client_path_sha256):
sha256 = client_path_sha256[client_path].digest()
hash_id = rdf_objects.SHA256HashID.FromBytes(sha256)
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]
data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)
if use_external_stores:
for client_path in iterkeys(verified_client_path_blob_refs):
metadatas[client_path_hash_id[client_path]] = FileMetadata(
client_path=client_path,
blob_refs=verified_client_path_blob_refs[client_path])
EXTERNAL_FILE_STORE.AddFiles(metadatas)
return client_path_hash_id | 133,023 |
Checks if files with given hashes are present in the file store.
Args:
hash_ids: A list of SHA256HashID objects.
Returns:
A dict where SHA256HashID objects are keys. Corresponding values
may be False (if hash id is not present) or True if it is not present. | def CheckHashes(hash_ids):
return {
k: bool(v)
for k, v in data_store.REL_DB.ReadHashBlobReferences(hash_ids).items()
} | 133,025 |
Adds multiple files to the file store.
Args:
hash_id_metadatas: A dictionary mapping hash ids to file metadata (a tuple
of hash client path and blob references). | def AddFiles(self, hash_id_metadatas):
for hash_id, metadata in iteritems(hash_id_metadatas):
self.AddFile(hash_id, metadata) | 133,028 |
Initializes StreamedFileChunk object.
Args:
client_path: db.ClientPath identifying the file.
data: bytes with chunk's contents.
chunk_index: Index of this chunk (relative to the sequence of chunks
corresponding to the file).
total_chunks: Total number of chunks corresponding to a given file.
offset: Offset of this chunk in bytes from the beginning of the file.
total_size: Total size of the file in bytes. | def __init__(self, client_path, data, chunk_index, total_chunks, offset,
total_size):
self.client_path = client_path
self.data = data
self.offset = offset
self.total_size = total_size
self.chunk_index = chunk_index
self.total_chunks = total_chunks | 133,036 |
Initializes the uploader.
Args:
action: A parent action that creates the uploader. Used to communicate
with the parent flow.
chunk_size: A number of (uncompressed) bytes per a chunk. | def __init__(self, action, chunk_size=None):
chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self._action = action
self._streamer = streaming.Streamer(chunk_size=chunk_size) | 133,038 |
Uploads chunks of a file on a given path to the transfer store flow.
Args:
filepath: A path to the file to upload.
offset: An integer offset at which the file upload should start on.
amount: An upper bound on number of bytes to stream. If it is `None` then
the whole file is uploaded.
Returns:
A `BlobImageDescriptor` object. | def UploadFilePath(self, filepath, offset=0, amount=None):
return self._UploadChunkStream(
self._streamer.StreamFilePath(filepath, offset=offset, amount=amount)) | 133,039 |
Uploads chunks of a given file descriptor to the transfer store flow.
Args:
fd: A file descriptor to upload.
offset: An integer offset at which the file upload should start on.
amount: An upper bound on number of bytes to stream. If it is `None` then
the whole file is uploaded.
Returns:
A `BlobImageDescriptor` object. | def UploadFile(self, fd, offset=0, amount=None):
return self._UploadChunkStream(
self._streamer.StreamFile(fd, offset=offset, amount=amount)) | 133,040 |
Uploads a single chunk to the transfer store flow.
Args:
chunk: A chunk to upload.
Returns:
A `BlobImageChunkDescriptor` object. | def _UploadChunk(self, chunk):
blob = _CompressedDataBlob(chunk)
self._action.ChargeBytesToSession(len(chunk.data))
self._action.SendReply(blob, session_id=self._TRANSFER_STORE_SESSION_ID)
return rdf_client_fs.BlobImageChunkDescriptor(
digest=hashlib.sha256(chunk.data).digest(),
offset=chunk.offset,
length=len(chunk.data)) | 133,042 |
Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type. | def __init__(self, metadata,
registry):
self.metadata = metadata
self.fields = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs)
field_names = [name for name, _ in self.fields]
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self.metric = prometheus_client.Counter(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
bins = metadata.bins or [
0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8,
9, 10, 15, 20, 50, 100
]
self.metric = prometheus_client.Histogram(
metadata.varname,
metadata.docstring,
labelnames=field_names,
buckets=bins,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
self.metric = prometheus_client.Gauge(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
else:
raise ValueError("Unknown metric type: {!r}".format(metadata.metric_type)) | 133,044 |
Instantiates a new PrometheusStatsCollector.
Args:
metadata_list: A list of MetricMetadata objects describing the metrics
that the StatsCollector will track.
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry. | def __init__(self, metadata_list, registry=None):
self._metrics = {} # type: Dict[Text, _Metric]
if registry is None:
self._registry = prometheus_client.CollectorRegistry(auto_describe=True)
else:
self._registry = registry
self.lock = threading.RLock()
super(PrometheusStatsCollector, self).__init__(metadata_list) | 133,048 |
Parses the file finder condition types into the condition objects.
Args:
conditions: An iterator over `FileFinderCondition` objects.
Yields:
`MetadataCondition` objects that correspond to the file-finder conditions. | def Parse(conditions):
kind = rdf_file_finder.FileFinderCondition.Type
classes = {
kind.MODIFICATION_TIME: ModificationTimeCondition,
kind.ACCESS_TIME: AccessTimeCondition,
kind.INODE_CHANGE_TIME: InodeChangeTimeCondition,
kind.SIZE: SizeCondition,
kind.EXT_FLAGS: ExtFlagsCondition,
}
for condition in conditions:
try:
yield classes[condition.condition_type](condition)
except KeyError:
pass | 133,057 |
Parses the file finder condition types into the condition objects.
Args:
conditions: An iterator over `FileFinderCondition` objects.
Yields:
`ContentCondition` objects that correspond to the file-finder conditions. | def Parse(conditions):
kind = rdf_file_finder.FileFinderCondition.Type
classes = {
kind.CONTENTS_LITERAL_MATCH: LiteralMatchCondition,
kind.CONTENTS_REGEX_MATCH: RegexMatchCondition,
}
for condition in conditions:
try:
yield classes[condition.condition_type](condition)
except KeyError:
pass | 133,069 |
Scans given file searching for occurrences of given pattern.
Args:
fd: A file descriptor of the file that needs to be searched.
matcher: A matcher object specifying a pattern to search for.
Yields:
`BufferReference` objects pointing to file parts with matching content. | def Scan(self, fd,
matcher):
streamer = streaming.Streamer(
chunk_size=self.CHUNK_SIZE, overlap_size=self.OVERLAP_SIZE)
offset = self.params.start_offset
amount = self.params.length
for chunk in streamer.StreamFile(fd, offset=offset, amount=amount):
for span in chunk.Scan(matcher):
ctx_begin = max(span.begin - self.params.bytes_before, 0)
ctx_end = min(span.end + self.params.bytes_after, len(chunk.data))
ctx_data = chunk.data[ctx_begin:ctx_end]
yield rdf_client.BufferReference(
offset=chunk.offset + ctx_begin,
length=len(ctx_data),
data=ctx_data)
if self.params.mode == self.params.Mode.FIRST_HIT:
return | 133,070 |
Log an http based api call.
Args:
request: A WSGI request object.
response: A WSGI response object. | def LogHttpAdminUIAccess(self, request, response):
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
api_method = response.headers.get("X-API-Method", "unknown")
api_reason = response.headers.get("X-GRR-Reason", "none")
log_msg = "%s API call [%s] by %s (reason: %s): %s [%d]" % (
event_id, api_method, request.user, api_reason, request.full_path,
response.status_code)
logging.info(log_msg)
if response.headers.get("X-No-Log") != "True":
if data_store.RelationalDBEnabled():
entry = rdf_objects.APIAuditEntry.FromHttpRequestResponse(
request, response)
data_store.REL_DB.WriteAPIAuditEntry(entry) | 133,083 |
Write a log entry for a Frontend or UI Request.
Args:
request: A HttpRequest protobuf.
source: Client id of the client initiating the request. Optional.
message_count: Number of messages received from the client. Optional. | def LogHttpFrontendAccess(self, request, source=None, message_count=None):
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
log_msg = "%s-%s [%s]: %s %s %s %s (%d)" % (
event_id, request.source_ip, source or "<unknown>", request.method,
request.url, request.user_agent, request.user, message_count or 0)
logging.info(log_msg) | 133,084 |
Computes a path to the specified package resource.
Args:
package_name: A name of the package where the resource is located.
filepath: A path to the resource relative to the package location.
Returns:
A path to the resource or `None` if the resource cannot be found. | def ResourcePath(package_name, filepath):
# If we are running a pyinstaller-built binary we rely on the sys.prefix
# code below and avoid running this which will generate confusing error
# messages.
if not getattr(sys, "frozen", None):
target = _GetPkgResources(package_name, filepath)
if target and os.access(target, os.R_OK):
return target
# Installing from wheel places data_files relative to sys.prefix and not
# site-packages. If we can not find in site-packages, check sys.prefix
# instead.
# https://python-packaging-user-guide.readthedocs.io/en/latest/distributing/#data-files
target = os.path.join(sys.prefix, filepath)
if target and os.access(target, os.R_OK):
return target
return None | 133,092 |
Computes a path to the specified module.
Args:
module_name: A name of the module to get the path for.
Returns:
A path to the specified module.
Raises:
ImportError: If specified module cannot be imported. | def ModulePath(module_name):
module = importlib.import_module(module_name)
path = inspect.getfile(module)
# TODO: In Python 2 `inspect.getfile` returns a byte string, so
# we have to decode that in order to be consistent with Python 3.
if compatibility.PY2:
path = path.decode("utf-8")
# In case of modules with want a path to the directory rather than to the
# `__init__.py` file itself.
if os.path.basename(path).startswith("__init__."):
path = os.path.dirname(path)
# Sometimes __file__ points at a .pyc file, when we really mean the .py.
if path.endswith(".pyc"):
path = path[:-4] + ".py"
return path | 133,093 |
r"""Converts the canonical paths as used by GRR to OS specific paths.
Due to the inconsistencies between handling paths in windows we need to
convert a path to an OS specific version prior to using it. This function
should be called just before any OS specific functions.
Canonical paths on windows have:
- / instead of \.
- Begin with /X:// where X is the drive letter.
Args:
path: A canonical path specification.
Returns:
A windows specific path. | def CanonicalPathToLocalPath(path):
r
# Account for raw devices
path = path.replace("/\\", "\\")
path = path.replace("/", "\\")
m = re.match(r"\\([a-zA-Z]):(.*)$", path)
if m:
path = "%s:\\%s" % (m.group(1), m.group(2).lstrip("\\"))
return path | 133,121 |
Provide chmod-like functionality for windows.
Doco links:
goo.gl/n7YR1
goo.gl/rDv81
goo.gl/hDobb
Args:
filename: target filename for acl
acl_list: list of ntsecuritycon acl strings to be applied with bitwise OR.
e.g. ["FILE_GENERIC_READ", "FILE_GENERIC_WRITE"]
user: username string. If not specified we use the user we are running as.
Raises:
AttributeError: if a bad permission is passed
RuntimeError: if filename doesn't exist | def WinChmod(filename, acl_list, user=None):
if user is None:
user = win32api.GetUserName()
if not os.path.exists(filename):
raise RuntimeError("filename %s does not exist" % filename)
acl_bitmask = 0
for acl in acl_list:
acl_bitmask |= getattr(ntsecuritycon, acl)
dacl = win32security.ACL()
win_user, _, _ = win32security.LookupAccountName("", user)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user)
security_descriptor = win32security.GetFileSecurity(
filename, win32security.DACL_SECURITY_INFORMATION)
# Tell windows to set the acl and mark it as explicitly set
security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl,
DACL_DEFAULT)
win32security.SetFileSecurity(
filename, win32security.DACL_SECURITY_INFORMATION, security_descriptor) | 133,123 |
Resolves the raw device that contains the path.
Args:
path: A path to examine.
Returns:
A pathspec to read the raw device as well as the modified path to read
within the raw device. This is usually the path without the mount point.
Raises:
IOError: if the path does not exist or some unexpected behaviour occurs. | def GetRawDevice(path):
path = CanonicalPathToLocalPath(path)
# Try to expand the shortened paths
try:
path = win32file.GetLongPathName(path)
except pywintypes.error:
pass
try:
mount_point = win32file.GetVolumePathName(path)
except pywintypes.error as details:
logging.info("path not found. %s", details)
raise IOError("No mountpoint for path: %s" % path)
if not path.startswith(mount_point):
stripped_mp = mount_point.rstrip("\\")
if not path.startswith(stripped_mp):
raise IOError("path %s is not mounted under %s" % (path, mount_point))
corrected_path = LocalPathToCanonicalPath(path[len(mount_point):])
corrected_path = utils.NormalizePath(corrected_path)
volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip("\\")
volume = LocalPathToCanonicalPath(volume)
# The pathspec for the raw volume
result = rdf_paths.PathSpec(
path=volume,
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point=mount_point.rstrip("\\"))
return result, corrected_path | 133,125 |
Write the message into the transaction log.
Args:
grr_message: A GrrMessage instance. | def Write(self, grr_message):
grr_message = grr_message.SerializeToString()
try:
winreg.SetValueEx(_GetServiceKey(), "Transaction", 0, winreg.REG_BINARY,
grr_message)
self._synced = False
except OSError:
pass | 133,133 |
Returns stat information about the given OS path, calling os.[l]stat.
Args:
path: A path to perform `stat` on.
follow_symlink: True if `stat` of a symlink should be returned instead of
a file that it points to. For non-symlinks this setting has no effect.
Returns:
Stat instance, with information about the given path. | def FromPath(cls, path, follow_symlink = True):
# Note that we do not add type assertion for `path` here. The reason is that
# many of the existing system calls (e.g. `os.listdir`) return results as
# bytestrings in Python 2. This is fine because it also means that they also
# accept bytestring paths as arguments in Python 2 (e.g. `os.stat`). Having
# consistent types in both versions is certainly desired but it might be too
# much work for too little benefit.
precondition.AssertType(follow_symlink, bool)
if follow_symlink:
stat_obj = os.stat(path)
else:
stat_obj = os.lstat(path)
return cls(path=path, stat_obj=stat_obj) | 133,137 |
Wrap an existing stat result in a `filesystem.Stat` instance.
Args:
path: the path of `stat_obj`.
stat_obj: an instance of os.stat_result with information about `path`. | def __init__(self, path, stat_obj):
self._path = path
self._stat = stat_obj
self._flags_linux = None
self._flags_osx = None | 133,138 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.