code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def Validate(rdf_artifact):
"""Attempts to validate the artifact has been well defined.
This checks both syntax and dependencies of the artifact. Because of that,
this method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDefinitionError: If artifact is invalid.
"""
ValidateSyntax(rdf_artifact)
ValidateDependencies(rdf_artifact) | Attempts to validate the artifact has been well defined.
This checks both syntax and dependencies of the artifact. Because of that,
this method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDefinitionError: If artifact is invalid. | Validate | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1):
"""Return a set of artifact dependencies.
Args:
rdf_artifact: RDF object artifact.
recursive: If True recurse into dependencies to find their dependencies.
depth: Used for limiting recursion depth.
Returns:
A set of strings containing the dependent artifact names.
Raises:
RuntimeError: If maximum recursion depth reached.
"""
deps = set()
for source in rdf_artifact.sources:
if source.type == rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP:
if source.attributes.GetItem("names"):
deps.update(source.attributes.GetItem("names"))
if depth > 10:
raise RuntimeError("Max artifact recursion depth reached.")
deps_set = set(deps)
if recursive:
for dep in deps:
artifact_obj = REGISTRY.GetArtifact(dep)
new_dep = GetArtifactDependencies(artifact_obj, True, depth=depth + 1)
if new_dep:
deps_set.update(new_dep)
return deps_set | Return a set of artifact dependencies.
Args:
rdf_artifact: RDF object artifact.
recursive: If True recurse into dependencies to find their dependencies.
depth: Used for limiting recursion depth.
Returns:
A set of strings containing the dependent artifact names.
Raises:
RuntimeError: If maximum recursion depth reached. | GetArtifactDependencies | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifactsDependenciesClosure(name_list, os_name=None):
"""For all the artifacts in the list returns them and their dependencies."""
artifacts = {
a.name: a
for a in REGISTRY.GetArtifacts(os_name=os_name, name_list=name_list)
}
dep_names = set()
for art in artifacts.values():
dep_names.update(GetArtifactDependencies(art, recursive=True))
if dep_names:
for dep in REGISTRY.GetArtifacts(os_name=os_name, name_list=dep_names):
artifacts[dep.name] = dep
return list(artifacts.values()) | For all the artifacts in the list returns them and their dependencies. | GetArtifactsDependenciesClosure | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifactPathDependencies(rdf_artifact):
"""Return a set of knowledgebase path dependencies.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
deps = set()
for source in rdf_artifact.sources:
for arg, value in source.attributes.items():
paths = []
if arg in ["path", "query"]:
paths.append(value)
if arg == "key_value_pairs":
# This is a REGISTRY_VALUE {key:blah, value:blah} dict.
paths.extend([x["key"] for x in value])
if arg in ["keys", "paths", "path_list", "content_regex_list"]:
paths.extend(value)
for path in paths:
for match in artifact_utils.INTERPOLATED_REGEX.finditer(path):
deps.add(match.group()[2:-2]) # Strip off %%.
return deps | Return a set of knowledgebase path dependencies.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"] | GetArtifactPathDependencies | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def InitializeDataStore():
"""Initialize the data store.
Depends on the stats module being initialized.
"""
global REL_DB # pylint: disable=global-statement
global BLOBS # pylint: disable=global-statement
if _LIST_STORAGE.value:
_ListStorageOptions()
sys.exit(0)
# Initialize the relational DB.
rel_db_name = config.CONFIG["Database.implementation"]
if not rel_db_name:
# TODO(hanuszczak): I think we should raise here instead of silently doing
# nothing.
return
try:
cls = registry_init.REGISTRY[rel_db_name]
except KeyError:
raise ValueError("Database %s not found." % rel_db_name)
logging.info("Using database implementation %s", rel_db_name)
REL_DB = db.DatabaseValidationWrapper(cls())
# Initialize the blobstore. This has to be done after the database has been
# already initialized as it might be possible that users want to use the data-
# base-backed blobstore implementation.
blobstore_name = config.CONFIG.Get("Blobstore.implementation")
try:
cls = blob_store.REGISTRY[blobstore_name]
except KeyError:
raise ValueError("No blob store %s found." % blobstore_name)
BLOBS = blob_store.BlobStoreValidationWrapper(cls()) | Initialize the data store.
Depends on the stats module being initialized. | InitializeDataStore | python | google/grr | grr/server/grr_response_server/data_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store.py | Apache-2.0 |
def _GenHttpRequestProto(self):
"""Create a valid request object."""
request = jobs_pb2.HttpRequest()
request.source_ip = "127.0.0.1"
request.user_agent = "Firefox or something"
request.url = "http://test.com/test?omg=11%45x%20%20"
request.user = "anonymous"
request.timestamp = int(time.time() * 1e6)
request.size = 1000
return request | Create a valid request object. | _GenHttpRequestProto | python | google/grr | grr/server/grr_response_server/server_logging_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging_test.py | Apache-2.0 |
def StopHuntIfCrashLimitExceeded(hunt_id):
"""Stops the hunt if number of crashes exceeds the limit."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
# Do nothing if the hunt is already stopped.
if hunt_obj.hunt_state == rdf_hunt_objects.Hunt.HuntState.STOPPED:
return hunt_obj
if hunt_obj.crash_limit:
hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id)
if hunt_counters.num_crashed_clients >= hunt_obj.crash_limit:
# Remove our rules from the forman and cancel all the started flows.
# Hunt will be hard-stopped and it will be impossible to restart it.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the crashes limit of"
f" {hunt_obj.crash_limit} and was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.TOTAL_CRASHES_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
return hunt_obj | Stops the hunt if number of crashes exceeds the limit. | StopHuntIfCrashLimitExceeded | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StopHuntIfCPUOrNetworkLimitsExceeded(hunt_id):
"""Stops the hunt if average limites are exceeded."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
# Do nothing if the hunt is already stopped.
if hunt_obj.hunt_state == rdf_hunt_objects.Hunt.HuntState.STOPPED:
return hunt_obj
hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id)
# Check global hunt network bytes limit first.
if (
hunt_obj.total_network_bytes_limit
and hunt_counters.total_network_bytes_sent
> hunt_obj.total_network_bytes_limit
):
reason = (
f"Hunt {hunt_obj.hunt_id} reached the total network bytes sent limit of"
f" {hunt_obj.total_network_bytes_limit} and was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.TOTAL_NETWORK_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
# Check that we have enough clients to apply average limits.
if hunt_counters.num_clients < MIN_CLIENTS_FOR_AVERAGE_THRESHOLDS:
return hunt_obj
# Check average per-client results count limit.
if hunt_obj.avg_results_per_client_limit:
avg_results_per_client = (
hunt_counters.num_results / hunt_counters.num_clients
)
if avg_results_per_client > hunt_obj.avg_results_per_client_limit:
# Stop the hunt since we get too many results per client.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the average results per client "
f"limit of {hunt_obj.avg_results_per_client_limit} and was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.AVG_RESULTS_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
# Check average per-client CPU seconds limit.
if hunt_obj.avg_cpu_seconds_per_client_limit:
avg_cpu_seconds_per_client = (
hunt_counters.total_cpu_seconds / hunt_counters.num_clients
)
if avg_cpu_seconds_per_client > hunt_obj.avg_cpu_seconds_per_client_limit:
# Stop the hunt since we use too many CPUs per client.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the average CPU seconds per client"
f" limit of {hunt_obj.avg_cpu_seconds_per_client_limit} and was"
" stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.AVG_CPU_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
# Check average per-client network bytes limit.
if hunt_obj.avg_network_bytes_per_client_limit:
avg_network_bytes_per_client = (
hunt_counters.total_network_bytes_sent / hunt_counters.num_clients
)
if (
avg_network_bytes_per_client
> hunt_obj.avg_network_bytes_per_client_limit
):
# Stop the hunt since we use too many network bytes sent
# per client.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the average network bytes per"
f" client limit of {hunt_obj.avg_network_bytes_per_client_limit} and"
" was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.AVG_NETWORK_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
return hunt_obj | Stops the hunt if average limites are exceeded. | StopHuntIfCPUOrNetworkLimitsExceeded | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def CompleteHuntIfExpirationTimeReached(hunt_id: str) -> rdf_hunt_objects.Hunt:
"""Marks the hunt as complete if it's past its expiry time."""
# TODO(hanuszczak): This should not set the hunt state to `COMPLETED` but we
# should have a separate `EXPIRED` state instead and set that.
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if (
hunt_obj.hunt_state
not in [
rdf_hunt_objects.Hunt.HuntState.STOPPED,
rdf_hunt_objects.Hunt.HuntState.COMPLETED,
]
and hunt_obj.expired
):
StopHunt(
hunt_obj.hunt_id,
hunts_pb2.Hunt.HuntStateReason.DEADLINE_REACHED,
reason_comment="Hunt completed.",
)
data_store.REL_DB.UpdateHuntObject(
hunt_obj.hunt_id, hunt_state=hunts_pb2.Hunt.HuntState.COMPLETED
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_obj.hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Marks the hunt as complete if it's past its expiry time. | CompleteHuntIfExpirationTimeReached | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def CreateHunt(hunt_obj: hunts_pb2.Hunt):
"""Creates a hunt using a given hunt object."""
data_store.REL_DB.WriteHuntObject(hunt_obj)
if hunt_obj.output_plugins:
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
output_plugins_states = flow.GetOutputPluginStates(
hunt_obj.output_plugins, source=f"hunts/{hunt_obj.hunt_id}"
)
output_plugins_states = [
mig_flow_runner.ToProtoOutputPluginState(state)
for state in output_plugins_states
]
data_store.REL_DB.WriteHuntOutputPluginsStates(
hunt_obj.hunt_id, output_plugins_states
) | Creates a hunt using a given hunt object. | CreateHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def CreateAndStartHunt(flow_name, flow_args, creator, **kwargs):
"""Creates and starts a new hunt."""
# This interface takes a time when the hunt expires. However, the legacy hunt
# starting interface took an rdfvalue.DurationSeconds object which was then
# added to the current time to get the expiry. This check exists to make sure
# we don't confuse the two.
if "duration" in kwargs:
precondition.AssertType(kwargs["duration"], rdfvalue.Duration)
hunt_args = rdf_hunt_objects.HuntArguments.Standard(
flow_name=flow_name, flow_args=rdf_structs.AnyValue.Pack(flow_args)
)
hunt_obj = rdf_hunt_objects.Hunt(
creator=creator,
args=hunt_args,
create_time=rdfvalue.RDFDatetime.Now(),
**kwargs,
)
hunt_obj = mig_hunt_objects.ToProtoHunt(hunt_obj)
CreateHunt(hunt_obj)
StartHunt(hunt_obj.hunt_id)
return hunt_obj.hunt_id | Creates and starts a new hunt. | CreateAndStartHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def _ScheduleGenericHunt(hunt_obj: rdf_hunt_objects.Hunt):
"""Adds foreman rules for a generic hunt."""
# TODO: Migrate foreman conditions to use relation expiration
# durations instead of absolute timestamps.
foreman_condition = foreman_rules.ForemanCondition(
creation_time=rdfvalue.RDFDatetime.Now(),
expiration_time=hunt_obj.init_start_time + hunt_obj.duration,
description=f"Hunt {hunt_obj.hunt_id} {hunt_obj.args.hunt_type}",
client_rule_set=hunt_obj.client_rule_set,
hunt_id=hunt_obj.hunt_id,
)
# Make sure the rule makes sense.
foreman_condition.Validate()
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(
foreman_condition
)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition) | Adds foreman rules for a generic hunt. | _ScheduleGenericHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def _ScheduleVariableHunt(hunt_obj: rdf_hunt_objects.Hunt):
"""Schedules flows for a variable hunt."""
if hunt_obj.client_rate != 0:
raise VariableHuntCanNotHaveClientRateError(
hunt_obj.hunt_id, hunt_obj.client_rate
)
seen_clients = set()
for flow_group in hunt_obj.args.variable.flow_groups:
for client_id in flow_group.client_ids:
if client_id in seen_clients:
raise CanStartAtMostOneFlowPerClientError(hunt_obj.hunt_id, client_id)
seen_clients.add(client_id)
now = rdfvalue.RDFDatetime.Now()
for flow_group in hunt_obj.args.variable.flow_groups:
flow_cls = registry.FlowRegistry.FlowClassByName(flow_group.flow_name)
if flow_group.HasField("flow_args"):
flow_args = flow_group.flow_args.Unpack(flow_cls.args_type)
else:
flow_args = None
for client_id in flow_group.client_ids:
flow.StartFlow(
client_id=client_id,
creator=hunt_obj.creator,
cpu_limit=hunt_obj.per_client_cpu_limit,
network_bytes_limit=hunt_obj.per_client_network_bytes_limit,
flow_cls=flow_cls,
flow_args=flow_args,
# Setting start_at explicitly ensures that flow.StartFlow won't
# process flow's Start state right away. Only the flow request
# will be scheduled.
start_at=now,
parent=flow.FlowParent.FromHuntID(hunt_obj.hunt_id),
) | Schedules flows for a variable hunt. | _ScheduleVariableHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StartHunt(hunt_id) -> rdf_hunt_objects.Hunt:
"""Starts a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
num_hunt_clients = data_store.REL_DB.CountHuntFlows(hunt_id)
if hunt_obj.hunt_state != hunt_obj.HuntState.PAUSED:
raise OnlyPausedHuntCanBeStartedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
hunt_state=hunts_pb2.Hunt.HuntState.STARTED,
start_time=rdfvalue.RDFDatetime.Now(),
num_clients_at_start_time=num_hunt_clients,
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD:
_ScheduleGenericHunt(hunt_obj)
elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE:
_ScheduleVariableHunt(hunt_obj)
else:
raise UnknownHuntTypeError(
f"Invalid hunt type for hunt {hunt_id}: {hunt_obj.args.hunt_type}"
)
return hunt_obj | Starts a hunt with a given id. | StartHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def PauseHunt(
hunt_id,
hunt_state_reason=None,
reason=None,
) -> rdf_hunt_objects.Hunt:
"""Pauses a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.hunt_state != hunt_obj.HuntState.STARTED:
raise OnlyStartedHuntCanBePausedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
hunt_state=hunts_pb2.Hunt.HuntState.PAUSED,
hunt_state_reason=hunt_state_reason,
hunt_state_comment=reason,
)
data_store.REL_DB.RemoveForemanRule(hunt_id=hunt_obj.hunt_id)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Pauses a hunt with a given id. | PauseHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StopHunt(
hunt_id: str,
hunt_state_reason: Optional[
hunts_pb2.Hunt.HuntStateReason.ValueType
] = None,
reason_comment: Optional[str] = None,
) -> rdf_hunt_objects.Hunt:
"""Stops a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.hunt_state not in [
hunt_obj.HuntState.STARTED,
hunt_obj.HuntState.PAUSED,
]:
raise OnlyStartedOrPausedHuntCanBeStoppedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
hunt_state=hunts_pb2.Hunt.HuntState.STOPPED,
hunt_state_reason=hunt_state_reason,
hunt_state_comment=reason_comment,
)
data_store.REL_DB.RemoveForemanRule(hunt_id=hunt_obj.hunt_id)
# TODO: Stop matching on string (comment).
if (
hunt_state_reason != hunts_pb2.Hunt.HuntStateReason.TRIGGERED_BY_USER
and reason_comment is not None
and reason_comment != CANCELLED_BY_USER
and hunt_obj.creator not in access_control.SYSTEM_USERS
):
notification.Notify(
hunt_obj.creator,
objects_pb2.UserNotification.Type.TYPE_HUNT_STOPPED,
reason_comment,
objects_pb2.ObjectReference(
reference_type=objects_pb2.ObjectReference.Type.HUNT,
hunt=objects_pb2.HuntReference(hunt_id=hunt_obj.hunt_id),
),
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Stops a hunt with a given id. | StopHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def UpdateHunt(
hunt_id,
client_limit=None,
client_rate=None,
duration=None,
) -> rdf_hunt_objects.Hunt:
"""Updates a hunt (it must be paused to be updated)."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.hunt_state != hunt_obj.HuntState.PAUSED:
raise OnlyPausedHuntCanBeModifiedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
client_limit=client_limit,
client_rate=client_rate,
duration=duration,
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Updates a hunt (it must be paused to be updated). | UpdateHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StartHuntFlowOnClient(client_id, hunt_id):
"""Starts a flow corresponding to a given hunt on a given client."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
# There may be a little race between foreman rules being removed and
# foreman scheduling a client on an (already) paused hunt. Making sure
# we don't lose clients in such a race by accepting clients for paused
# hunts.
if not models_hunts.IsHuntSuitableForFlowProcessing(hunt_obj.hunt_state):
return
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD:
hunt_args = hunt_obj.args.standard
if hunt_obj.client_rate > 0:
# Given that we use caching in _GetNumClients and hunt_obj may be updated
# in another process, we have to account for cases where num_clients_diff
# may go below 0.
num_clients_diff = max(
0,
_GetNumClients(hunt_obj.hunt_id) - hunt_obj.num_clients_at_start_time,
)
next_client_due_msecs = int(
num_clients_diff / hunt_obj.client_rate * 60e6
)
start_at = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(
hunt_obj.last_start_time.AsMicrosecondsSinceEpoch()
+ next_client_due_msecs
)
else:
start_at = None
# TODO(user): remove client_rate support when AFF4 is gone.
# In REL_DB always work as if client rate is 0.
flow_cls = registry.FlowRegistry.FlowClassByName(hunt_args.flow_name)
if hunt_args.HasField("flow_args"):
flow_args = hunt_args.flow_args.Unpack(flow_cls.args_type)
else:
flow_args = None
flow.StartFlow(
client_id=client_id,
creator=hunt_obj.creator,
cpu_limit=hunt_obj.per_client_cpu_limit,
network_bytes_limit=hunt_obj.per_client_network_bytes_limit,
flow_cls=flow_cls,
flow_args=flow_args,
start_at=start_at,
parent=flow.FlowParent.FromHuntID(hunt_id),
)
if hunt_obj.client_limit:
if _GetNumClients(hunt_obj.hunt_id) >= hunt_obj.client_limit:
try:
PauseHunt(
hunt_id,
hunt_state_reason=rdf_hunt_objects.Hunt.HuntStateReason.TOTAL_CLIENTS_EXCEEDED,
)
except OnlyStartedHuntCanBePausedError:
pass
elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE:
raise NotImplementedError()
else:
raise UnknownHuntTypeError(
f"Can't determine hunt type when starting hunt {client_id} on client"
f" {hunt_id}."
) | Starts a flow corresponding to a given hunt on a given client. | StartHuntFlowOnClient | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def ProcessMessages(self, msgs):
"""This is where messages get processed.
Override in derived classes.
Args:
msgs: The GrrMessages sent by the client.
""" | This is where messages get processed.
Override in derived classes.
Args:
msgs: The GrrMessages sent by the client. | ProcessMessages | python | google/grr | grr/server/grr_response_server/message_handlers.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/message_handlers.py | Apache-2.0 |
def __init__(self, source_urn=None):
"""OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
Raises:
ValueError: If one of the keyword arguments is empty.
"""
super().__init__()
if not source_urn:
raise ValueError("source_urn can't be empty.")
self.source_urn = source_urn | OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
Raises:
ValueError: If one of the keyword arguments is empty. | __init__ | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def output_file_name(self):
"""Name of the file where plugin's output should be written to."""
safe_path = re.sub(r":|/", "_", self.source_urn.Path().lstrip("/"))
return "results_%s%s" % (safe_path, self.output_file_extension) | Name of the file where plugin's output should be written to. | output_file_name | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def Start(self):
"""Start method is called in the beginning of the export.
Yields:
Chunks of bytes.
""" | Start method is called in the beginning of the export.
Yields:
Chunks of bytes. | Start | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ProcessValues(self, value_cls, values_generator_fn):
"""Processes a batch of values with the same type.
ProcessValues is called *once per value type* for each value type in
the flow/hunt results collection.
Args:
value_cls: Class identifying type of the values to be processed.
values_generator_fn: Function returning an iterable with values. Each
value is a GRRMessage wrapping a value of a value_cls type.
values_generator_fn may be called multiple times within 1
ProcessValues() call - for example, when multiple passes over the data
are required.
"""
raise NotImplementedError() | Processes a batch of values with the same type.
ProcessValues is called *once per value type* for each value type in
the flow/hunt results collection.
Args:
value_cls: Class identifying type of the values to be processed.
values_generator_fn: Function returning an iterable with values. Each
value is a GRRMessage wrapping a value of a value_cls type.
values_generator_fn may be called multiple times within 1
ProcessValues() call - for example, when multiple passes over the data
are required. | ProcessValues | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def Finish(self):
"""Finish method is called at the very end of the export.
Yields:
Chunks of bytes.
""" | Finish method is called at the very end of the export.
Yields:
Chunks of bytes. | Finish | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def _GetMetadataForClients(self, client_urns):
"""Fetches metadata for a given list of clients."""
result = {}
metadata_to_fetch = set()
for urn in client_urns:
try:
result[urn] = self._cached_metadata[urn]
except KeyError:
metadata_to_fetch.add(urn)
if metadata_to_fetch:
client_ids = set(urn.Basename() for urn in metadata_to_fetch)
infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids)
fetched_metadata = [
export.GetMetadata(client_id, mig_objects.ToRDFClientFullInfo(info))
for client_id, info in infos.items()
]
for metadata in fetched_metadata:
metadata.source_urn = self.source_urn
self._cached_metadata[metadata.client_urn] = metadata
result[metadata.client_urn] = metadata
metadata_to_fetch.remove(metadata.client_urn)
for urn in metadata_to_fetch:
default_mdata = base.ExportedMetadata(source_urn=self.source_urn)
result[urn] = default_mdata
self._cached_metadata[urn] = default_mdata
return [result[urn] for urn in client_urns] | Fetches metadata for a given list of clients. | _GetMetadataForClients | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def GetExportOptions(self):
"""Rerturns export options to be used by export converter."""
return base.ExportOptions() | Rerturns export options to be used by export converter. | GetExportOptions | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ProcessSingleTypeExportedValues(self, original_type, exported_values):
"""Processes exported values of the same type.
Exported_values are guaranteed to have the same type. Consequently, this
function may be called multiple times with the same original_type
argument. Typical example: when export converters generate multiple
kinds of exported values for a given source value (for example,
Process is converted to ExportedProcess and ExportedNetworkConnection
values).
Args:
original_type: Class of the original set of values that were converted to
exported_values.
exported_values: An iterator with exported value. All values are
guaranteed to have the same class.
Yields:
Chunks of bytes.
"""
raise NotImplementedError() | Processes exported values of the same type.
Exported_values are guaranteed to have the same type. Consequently, this
function may be called multiple times with the same original_type
argument. Typical example: when export converters generate multiple
kinds of exported values for a given source value (for example,
Process is converted to ExportedProcess and ExportedNetworkConnection
values).
Args:
original_type: Class of the original set of values that were converted to
exported_values.
exported_values: An iterator with exported value. All values are
guaranteed to have the same class.
Yields:
Chunks of bytes. | ProcessSingleTypeExportedValues | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def _GenerateSingleTypeIteration(
self, next_types, processed_types, converted_responses
):
"""Yields responses of a given type only.
_GenerateSingleTypeIteration iterates through converted_responses and
only yields responses of the same type. The type is either popped from
next_types or inferred from the first item of converted_responses.
The type is added to a set of processed_types.
Along the way _GenerateSingleTypeIteration updates next_types set.
All newly encountered and not previously processed types are added to
next_types set.
Calling _GenerateSingleTypeIteration multiple times allows doing
multiple passes on converted responses and emitting converted responses
of the same type continuously (so that they can be written into
the same file by the plugin).
Args:
next_types: List of value type classes that will be used in further
iterations.
processed_types: List of value type classes that have been used already.
converted_responses: Iterable with values to iterate over.
Yields:
Values from converted_response with the same type. Type is either
popped from the next_types set or inferred from the first
converted_responses value.
"""
if not next_types:
current_type = None
else:
current_type = next_types.pop()
processed_types.add(current_type)
for converted_response in converted_responses:
if not current_type:
current_type = converted_response.__class__
processed_types.add(current_type)
if converted_response.__class__ != current_type:
if converted_response.__class__ not in processed_types:
next_types.add(converted_response.__class__)
continue
yield converted_response | Yields responses of a given type only.
_GenerateSingleTypeIteration iterates through converted_responses and
only yields responses of the same type. The type is either popped from
next_types or inferred from the first item of converted_responses.
The type is added to a set of processed_types.
Along the way _GenerateSingleTypeIteration updates next_types set.
All newly encountered and not previously processed types are added to
next_types set.
Calling _GenerateSingleTypeIteration multiple times allows doing
multiple passes on converted responses and emitting converted responses
of the same type continuously (so that they can be written into
the same file by the plugin).
Args:
next_types: List of value type classes that will be used in further
iterations.
processed_types: List of value type classes that have been used already.
converted_responses: Iterable with values to iterate over.
Yields:
Values from converted_response with the same type. Type is either
popped from the next_types set or inferred from the first
converted_responses value. | _GenerateSingleTypeIteration | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def _GenerateConvertedValues(self, converter, grr_messages):
"""Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Values generated by the converter.
Raises:
ValueError: if any of the GrrMessage objects doesn't have "source" set.
"""
for batch in collection.Batch(grr_messages, self.BATCH_SIZE):
metadata_items = self._GetMetadataForClients([gm.source for gm in batch])
batch_with_metadata = zip(metadata_items, [gm.payload for gm in batch])
for result in converter.BatchConvert(batch_with_metadata):
yield result | Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Values generated by the converter.
Raises:
ValueError: if any of the GrrMessage objects doesn't have "source" set. | _GenerateConvertedValues | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ApplyPluginToMultiTypeCollection(
plugin, output_collection, source_urn=None
):
"""Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
"source" attribute set.
Yields:
Bytes chunks, as generated by the plugin.
"""
for chunk in plugin.Start():
yield chunk
for stored_type_name in sorted(output_collection.ListStoredTypes()):
stored_cls = rdfvalue.RDFValue.classes[stored_type_name]
# pylint: disable=cell-var-from-loop
def GetValues():
for timestamp, value in output_collection.ScanByType(stored_type_name):
_ = timestamp
if source_urn:
value.source = source_urn
yield value
# pylint: enable=cell-var-from-loop
for chunk in plugin.ProcessValues(stored_cls, GetValues):
yield chunk
for chunk in plugin.Finish():
yield chunk | Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
"source" attribute set.
Yields:
Bytes chunks, as generated by the plugin. | ApplyPluginToMultiTypeCollection | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ApplyPluginToTypedCollection(plugin, type_names, fetch_fn):
"""Applies instant output plugin to a collection of results.
Args:
plugin: InstantOutputPlugin instance.
type_names: List of type names (strings) to be processed.
fetch_fn: Function that takes a type name as an argument and returns
available items (FlowResult) corresponding to this type. Items are
returned as a generator
Yields:
Bytes chunks, as generated by the plugin.
"""
for chunk in plugin.Start():
yield chunk
def GetValues(tn):
for v in fetch_fn(tn):
yield v
for type_name in sorted(type_names):
stored_cls = rdfvalue.RDFValue.classes[type_name]
for chunk in plugin.ProcessValues(
stored_cls, functools.partial(GetValues, type_name)
):
yield chunk
for chunk in plugin.Finish():
yield chunk | Applies instant output plugin to a collection of results.
Args:
plugin: InstantOutputPlugin instance.
type_names: List of type names (strings) to be processed.
fetch_fn: Function that takes a type name as an argument and returns
available items (FlowResult) corresponding to this type. Items are
returned as a generator
Yields:
Bytes chunks, as generated by the plugin. | ApplyPluginToTypedCollection | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def GetClientVersion(client_id):
"""Returns last known GRR version that the client used."""
sinfo = data_store.REL_DB.ReadClientStartupInfo(client_id=client_id)
if sinfo is not None:
return sinfo.client_info.client_version
else:
return config.CONFIG["Source.version_numeric"] | Returns last known GRR version that the client used. | GetClientVersion | python | google/grr | grr/server/grr_response_server/data_store_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store_utils.py | Apache-2.0 |
def GetClientOs(client_id: str) -> str:
"""Returns last known operating system name that the client used."""
if (snapshot := data_store.REL_DB.ReadClientSnapshot(client_id)) is not None:
return snapshot.knowledge_base.os
else:
return "" | Returns last known operating system name that the client used. | GetClientOs | python | google/grr | grr/server/grr_response_server/data_store_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store_utils.py | Apache-2.0 |
def GetFileHashEntry(fd):
"""Returns an `rdf_crypto.Hash` instance for given AFF4 file descriptor."""
# Hash file store is not migrated to RELDB just yet, hence the first check.
client_id, vfs_path = fd.urn.Split(2)
path_type, components = rdf_objects.ParseCategorizedPath(vfs_path)
path_info = data_store.REL_DB.ReadPathInfo(client_id, path_type, components)
if path_info is None:
return None
return mig_objects.ToRDFPathInfo(path_info).hash_entry | Returns an `rdf_crypto.Hash` instance for given AFF4 file descriptor. | GetFileHashEntry | python | google/grr | grr/server/grr_response_server/data_store_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store_utils.py | Apache-2.0 |
def GetOutputPluginStates(output_plugins, source=None):
"""Initializes state for a list of output plugins."""
output_plugins_states = []
for plugin_descriptor in output_plugins:
plugin_class = plugin_descriptor.GetPluginClass()
try:
_, plugin_state = plugin_class.CreatePluginAndDefaultState(
source_urn=source, args=plugin_descriptor.args
)
except Exception as e: # pylint: disable=broad-except
raise ValueError(
"Plugin %s failed to initialize (%s)" % (plugin_class, e)
) from e
output_plugins_states.append(
rdf_flow_runner.OutputPluginState(
plugin_state=plugin_state, plugin_descriptor=plugin_descriptor
)
)
return output_plugins_states | Initializes state for a list of output plugins. | GetOutputPluginStates | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def RandomFlowId() -> str:
"""Returns a random flow id encoded as a hex string."""
return "{:016X}".format(random.Id64()) | Returns a random flow id encoded as a hex string. | RandomFlowId | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def __init__(
self,
parent_type: _ParentType,
parent_id: Optional[str] = None,
parent_flow_obj=None,
):
"""Instantiates a FlowParent. Use the class methods instead."""
self.type = parent_type
self.id = parent_id
self.flow_obj = parent_flow_obj | Instantiates a FlowParent. Use the class methods instead. | __init__ | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_flow(self) -> bool:
"""True, if the flow is started as child-flow."""
return self.type == _ParentType.FLOW | True, if the flow is started as child-flow. | is_flow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_hunt(self) -> bool:
"""True, if the flow is started as part of a hunt."""
return self.type == _ParentType.HUNT | True, if the flow is started as part of a hunt. | is_hunt | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_root(self) -> bool:
"""True, if the flow is started as top-level flow."""
return self.type == _ParentType.ROOT | True, if the flow is started as top-level flow. | is_root | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_scheduled_flow(self) -> bool:
"""True, if the flow is started from a ScheduledFlow."""
return self.type == _ParentType.SCHEDULED_FLOW | True, if the flow is started from a ScheduledFlow. | is_scheduled_flow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromFlow(cls, flow_obj) -> "FlowParent":
"""References another flow (flow_base.FlowBase) as parent."""
return cls(_ParentType.FLOW, flow_obj.rdf_flow.flow_id, flow_obj) | References another flow (flow_base.FlowBase) as parent. | FromFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromHuntID(cls, hunt_id: str) -> "FlowParent":
"""References another hunt as parent by its ID."""
return cls(_ParentType.HUNT, hunt_id) | References another hunt as parent by its ID. | FromHuntID | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromRoot(cls) -> "FlowParent":
"""References no parent to mark a flow as top-level flow."""
return cls(_ParentType.ROOT) | References no parent to mark a flow as top-level flow. | FromRoot | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromScheduledFlowID(cls, scheduled_flow_id: str) -> "FlowParent":
"""References a ScheduledFlow as parent by its ID."""
return cls(_ParentType.SCHEDULED_FLOW, scheduled_flow_id) | References a ScheduledFlow as parent by its ID. | FromScheduledFlowID | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def StartFlow(
client_id: Optional[str] = None,
cpu_limit: Optional[int] = None,
creator: Optional[str] = None,
flow_args: Optional[rdf_structs.RDFStruct] = None,
flow_cls=None,
network_bytes_limit: Optional[int] = None,
original_flow: Optional[rdf_objects.FlowReference] = None,
output_plugins: Optional[
Sequence[rdf_output_plugin.OutputPluginDescriptor]
] = None,
start_at: Optional[rdfvalue.RDFDatetime] = None,
parent: Optional[FlowParent] = None,
runtime_limit: Optional[rdfvalue.Duration] = None,
) -> str:
"""The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent: A FlowParent referencing the parent, or None for top-level flows.
runtime_limit: Runtime limit as Duration for all ClientActions.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided.
"""
# Is the required flow a known flow?
try:
registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
except ValueError:
GRR_FLOW_INVALID_FLOW_COUNT.Increment()
raise ValueError("Unable to locate flow %s" % flow_cls.__name__)
if not client_id:
raise ValueError("Client_id is needed to start a flow.")
# Now parse the flow args into the new object from the keywords.
if flow_args is None:
flow_args = flow_cls.args_type()
if not isinstance(flow_args, flow_cls.args_type):
raise TypeError(
f"Flow args must be of type {flow_cls.args_type}, got"
f" {type(flow_args)} with contents: {flow_args!r}."
)
# Check that the flow args are valid.
flow_args.Validate()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_class_name=flow_cls.__name__,
args=flow_args,
creator=creator,
output_plugins=output_plugins,
original_flow=original_flow,
flow_state="RUNNING",
)
if parent is None:
parent = FlowParent.FromRoot()
if parent.is_hunt or parent.is_scheduled_flow:
# When starting a flow from a hunt or ScheduledFlow, re-use the parent's id
# to make it easy to find flows. For hunts, every client has a top-level
# flow with the hunt's id.
rdf_flow.flow_id = parent.id
else: # For new top-level and child flows, assign a random ID.
rdf_flow.flow_id = RandomFlowId()
# For better performance, only do conflicting IDs check for top-level flows.
if not parent.is_flow:
try:
data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
except db.UnknownFlowError:
pass
if parent.is_flow: # A flow is a nested flow.
parent_rdf_flow = parent.flow_obj.rdf_flow
rdf_flow.long_flow_id = "%s/%s" % (
parent_rdf_flow.long_flow_id,
rdf_flow.flow_id,
)
rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
rdf_flow.parent_request_id = parent.flow_obj.GetCurrentOutboundId()
if parent_rdf_flow.creator:
rdf_flow.creator = parent_rdf_flow.creator
elif parent.is_hunt: # Root-level hunt-induced flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
rdf_flow.parent_hunt_id = parent.id
elif parent.is_root or parent.is_scheduled_flow:
# A flow is a root-level non-hunt flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
else:
raise ValueError(f"Unknown flow parent type {parent}")
if output_plugins:
rdf_flow.output_plugins_states = GetOutputPluginStates(
output_plugins, rdf_flow.long_flow_id
)
if network_bytes_limit is not None:
rdf_flow.network_bytes_limit = network_bytes_limit
if cpu_limit is not None:
rdf_flow.cpu_limit = cpu_limit
if runtime_limit is not None:
rdf_flow.runtime_limit_us = runtime_limit
logging.info(
"Starting %s(%s) on %s (%s)",
rdf_flow.long_flow_id,
rdf_flow.flow_class_name,
client_id,
start_at or "now",
)
rdf_flow.current_state = "Start"
flow_obj = flow_cls(rdf_flow)
# Prevent a race condition, where a flow is scheduled twice, because one
# worker inserts the row and another worker silently updates the existing row.
allow_update = False
if start_at is None:
# Store an initial version of the flow straight away. This is needed so the
# database doesn't raise consistency errors due to missing parent keys when
# writing logs / errors / results which might happen in Start().
try:
proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow)
data_store.REL_DB.WriteFlowObject(proto_flow, allow_update=False)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
allow_update = True
try:
# Just run the first state inline. NOTE: Running synchronously means
# that this runs on the thread that starts the flow. The advantage is
# that that Start method can raise any errors immediately.
flow_obj.Start()
# The flow does not need to actually remain running.
if not flow_obj.outstanding_requests:
flow_obj.RunStateMethod("End")
# Additional check for the correct state in case the End method raised
# and terminated the flow.
if flow_obj.IsRunning():
flow_obj.MarkDone()
except Exception as e: # pylint: disable=broad-except
# We catch all exceptions that happen in Start() and mark the flow as
# failed.
msg = str(e)
flow_obj.Error(error_message=msg, backtrace=traceback.format_exc())
else:
flow_obj.CallState("Start", start_time=start_at)
flow_obj.PersistState()
try:
proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow)
data_store.REL_DB.WriteFlowObject(proto_flow, allow_update=allow_update)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
if parent.is_flow:
# We can optimize here and not write requests/responses to the database
# since we have to do this for the parent flow at some point anyways.
parent.flow_obj.MergeQueuedMessages(flow_obj)
else:
flow_obj.FlushQueuedMessages()
return rdf_flow.flow_id | The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent: A FlowParent referencing the parent, or None for top-level flows.
runtime_limit: Runtime limit as Duration for all ClientActions.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided. | StartFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def ScheduleFlow(
client_id: str,
creator: str,
flow_name: str,
flow_args: any_pb2.Any,
runner_args: flows_pb2.FlowRunnerArgs,
) -> flows_pb2.ScheduledFlow:
"""Schedules a Flow on the client, to be started upon approval grant."""
scheduled_flow = flows_pb2.ScheduledFlow()
scheduled_flow.client_id = client_id
scheduled_flow.creator = creator
scheduled_flow.scheduled_flow_id = RandomFlowId()
scheduled_flow.flow_name = flow_name
scheduled_flow.flow_args.CopyFrom(flow_args)
scheduled_flow.runner_args.CopyFrom(runner_args)
scheduled_flow.create_time = int(rdfvalue.RDFDatetime.Now())
data_store.REL_DB.WriteScheduledFlow(scheduled_flow)
return scheduled_flow | Schedules a Flow on the client, to be started upon approval grant. | ScheduleFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def UnscheduleFlow(
client_id: str,
creator: str,
scheduled_flow_id: str,
) -> None:
"""Unschedules and deletes a previously scheduled flow."""
data_store.REL_DB.DeleteScheduledFlow(
client_id=client_id, creator=creator, scheduled_flow_id=scheduled_flow_id
) | Unschedules and deletes a previously scheduled flow. | UnscheduleFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def ListScheduledFlows(
client_id: str,
creator: str,
) -> Sequence[rdf_flow_objects.ScheduledFlow]:
"""Lists all scheduled flows of a user on a client."""
return data_store.REL_DB.ListScheduledFlows(
client_id=client_id, creator=creator
) | Lists all scheduled flows of a user on a client. | ListScheduledFlows | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def StartScheduledFlows(client_id: str, creator: str) -> None:
"""Starts all scheduled flows of a user on a client.
This function delegates to StartFlow() to start the actual flow. If an error
occurs during StartFlow(), the ScheduledFlow is not deleted, but it is
updated by writing the `error` field to the database. The exception is NOT
re-raised and the next ScheduledFlow is attempted to be started.
Args:
client_id: The ID of the client of the ScheduledFlows.
creator: The username of the user who created the ScheduledFlows.
Raises:
UnknownClientError: if no client with client_id exists.
UnknownGRRUserError: if creator does not exist as user.
"""
# Validate existence of Client and User. Data races are not an issue - no
# flows get started in any case.
data_store.REL_DB.ReadClientMetadata(client_id)
data_store.REL_DB.ReadGRRUser(creator)
scheduled_flows = ListScheduledFlows(client_id, creator)
for sf in scheduled_flows:
try:
sf = mig_flow_objects.ToRDFScheduledFlow(sf)
flow_id = _StartScheduledFlow(sf)
logging.info(
"Started Flow %s/%s from ScheduledFlow %s",
client_id,
flow_id,
sf.scheduled_flow_id,
)
except Exception: # pylint: disable=broad-except
logging.exception(
"Cannot start ScheduledFlow %s %s/%s from %s",
sf.flow_name,
sf.client_id,
sf.scheduled_flow_id,
sf.creator,
) | Starts all scheduled flows of a user on a client.
This function delegates to StartFlow() to start the actual flow. If an error
occurs during StartFlow(), the ScheduledFlow is not deleted, but it is
updated by writing the `error` field to the database. The exception is NOT
re-raised and the next ScheduledFlow is attempted to be started.
Args:
client_id: The ID of the client of the ScheduledFlows.
creator: The username of the user who created the ScheduledFlows.
Raises:
UnknownClientError: if no client with client_id exists.
UnknownGRRUserError: if creator does not exist as user. | StartScheduledFlows | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def _StartScheduledFlow(scheduled_flow: rdf_flow_objects.ScheduledFlow) -> str:
"""Starts a Flow from a ScheduledFlow and deletes the ScheduledFlow."""
sf = scheduled_flow
ra = scheduled_flow.runner_args
try:
flow_id = StartFlow(
client_id=sf.client_id,
creator=sf.creator,
flow_args=sf.flow_args,
flow_cls=registry.FlowRegistry.FlowClassByName(sf.flow_name),
output_plugins=ra.output_plugins,
start_at=rdfvalue.RDFDatetime.Now(),
parent=FlowParent.FromScheduledFlowID(sf.scheduled_flow_id),
cpu_limit=ra.cpu_limit,
network_bytes_limit=ra.network_bytes_limit,
# runtime_limit is missing in FlowRunnerArgs.
)
except Exception as e:
scheduled_flow = mig_flow_objects.ToProtoScheduledFlow(scheduled_flow)
scheduled_flow.error = str(e)
data_store.REL_DB.WriteScheduledFlow(scheduled_flow)
raise
data_store.REL_DB.DeleteScheduledFlow(
client_id=scheduled_flow.client_id,
creator=scheduled_flow.creator,
scheduled_flow_id=scheduled_flow.scheduled_flow_id,
)
return flow_id | Starts a Flow from a ScheduledFlow and deletes the ScheduledFlow. | _StartScheduledFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def EnrollFleetspeakClientIfNeeded(
self,
client_id: str,
fleetspeak_validation_tags: Mapping[str, str],
) -> Optional[rdf_objects.ClientMetadata]:
"""Enrols a Fleetspeak-enabled client for use with GRR.
Args:
client_id: GRR client-id for the client.
fleetspeak_validation_tags: Validation tags supplied by Fleetspeak.
Returns:
None if the client is new, and actually got enrolled. This method
is a no-op if the client already exists (in which case the existing
client metadata is returned).
"""
client_urn = rdf_client.ClientURN(client_id)
# If already enrolled, return.
try:
return mig_objects.ToRDFClientMetadata(
data_store.REL_DB.ReadClientMetadata(client_id)
)
except db.UnknownClientError:
pass
logging.info("Enrolling a new Fleetspeak client: %r", client_id)
now = rdfvalue.RDFDatetime.Now()
data_store.REL_DB.WriteClientMetadata(
client_id,
first_seen=now,
last_ping=now,
fleetspeak_validation_info=fleetspeak_validation_tags,
)
# Publish the client enrollment message.
events.Events.PublishEvent(
"ClientEnrollment", client_urn, username=FRONTEND_USERNAME
)
return None | Enrols a Fleetspeak-enabled client for use with GRR.
Args:
client_id: GRR client-id for the client.
fleetspeak_validation_tags: Validation tags supplied by Fleetspeak.
Returns:
None if the client is new, and actually got enrolled. This method
is a no-op if the client already exists (in which case the existing
client metadata is returned). | EnrollFleetspeakClientIfNeeded | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def ReceiveMessages(
self,
client_id: str,
messages: Sequence[rdf_flows.GrrMessage],
) -> None:
"""Receives and processes the messages.
For each message we update the request object, and place the
response in that request's queue. If the request is complete, we
send a message to the worker.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues.
"""
now = time.time()
unprocessed_msgs = []
worker_message_handler_requests = []
frontend_message_handler_requests = []
dropped_count = 0
# TODO: Remove `fixed_messages` once old clients
# have been migrated.
fixed_messages = []
for message in messages:
if message.type != rdf_flows.GrrMessage.Type.STATUS:
fixed_messages.append(message)
continue
stat = rdf_flows.GrrStatus(message.payload)
if not stat.HasField("cpu_time_used"):
fixed_messages.append(message)
continue
if stat.cpu_time_used.HasField("deprecated_user_cpu_time"):
stat.cpu_time_used.user_cpu_time = (
stat.cpu_time_used.deprecated_user_cpu_time
)
stat.cpu_time_used.deprecated_user_cpu_time = None
if stat.cpu_time_used.HasField("deprecated_system_cpu_time"):
stat.cpu_time_used.system_cpu_time = (
stat.cpu_time_used.deprecated_system_cpu_time
)
stat.cpu_time_used.deprecated_system_cpu_time = None
message.payload = stat
fixed_messages.append(message)
messages = fixed_messages
msgs_by_session_id = collection.Group(messages, lambda m: m.session_id)
for session_id, msgs in msgs_by_session_id.items():
try:
for msg in msgs:
if (
msg.auth_state != msg.AuthorizationState.AUTHENTICATED
):
dropped_count += 1
continue
session_id_str = str(session_id)
if session_id_str in message_handlers.session_id_map:
request = rdf_objects.MessageHandlerRequest(
client_id=msg.source.Basename(),
handler_name=message_handlers.session_id_map[session_id],
request_id=msg.response_id or random.UInt32(),
request=msg.payload,
)
if request.handler_name in self._SHORTCUT_HANDLERS:
frontend_message_handler_requests.append(request)
else:
worker_message_handler_requests.append(request)
elif session_id_str in self.legacy_well_known_session_ids:
logging.debug(
"Dropping message for legacy well known session id %s",
session_id,
)
else:
unprocessed_msgs.append(msg)
except ValueError:
logging.exception(
"Unpacking error in at least one of %d messages for session id %s",
len(msgs),
session_id,
)
raise
if dropped_count:
logging.info(
"Dropped %d unauthenticated messages for %s", dropped_count, client_id
)
if unprocessed_msgs:
flow_responses = []
for message in unprocessed_msgs:
try:
response = rdf_flow_objects.FlowResponseForLegacyResponse(message)
except ValueError as e:
logging.warning(
"Failed to parse legacy FlowResponse:\n%s\n%s", e, message
)
else:
if isinstance(response, rdf_flow_objects.FlowStatus):
response = mig_flow_objects.ToProtoFlowStatus(response)
if isinstance(response, rdf_flow_objects.FlowIterator):
response = mig_flow_objects.ToProtoFlowIterator(response)
if isinstance(response, rdf_flow_objects.FlowResponse):
response = mig_flow_objects.ToProtoFlowResponse(response)
flow_responses.append(response)
data_store.REL_DB.WriteFlowResponses(flow_responses)
for msg in unprocessed_msgs:
if msg.type == rdf_flows.GrrMessage.Type.STATUS:
stat = rdf_flows.GrrStatus(msg.payload)
if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:
# A client crashed while performing an action, fire an event.
crash_details = rdf_client.ClientCrash(
client_id=client_id,
session_id=msg.session_id,
backtrace=stat.backtrace,
crash_message=stat.error_message,
timestamp=rdfvalue.RDFDatetime.Now(),
)
events.Events.PublishEvent(
"ClientCrash", crash_details, username=FRONTEND_USERNAME
)
if worker_message_handler_requests:
worker_message_handler_requests = [
mig_objects.ToProtoMessageHandlerRequest(r)
for r in worker_message_handler_requests
]
data_store.REL_DB.WriteMessageHandlerRequests(
worker_message_handler_requests
)
if frontend_message_handler_requests:
frontend_message_handler_requests = [
mig_objects.ToProtoMessageHandlerRequest(r)
for r in frontend_message_handler_requests
]
worker_lib.ProcessMessageHandlerRequests(
frontend_message_handler_requests
)
logging.debug(
"Received %s messages from %s in %s sec",
len(messages),
client_id,
time.time() - now,
) | Receives and processes the messages.
For each message we update the request object, and place the
response in that request's queue. If the request is complete, we
send a message to the worker.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues. | ReceiveMessages | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def ReceiveRRGResponse(
self,
client_id: str,
response: rrg_pb2.Response,
) -> None:
"""Receives and processes a single response from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
response: A response to process.
"""
self.ReceiveRRGResponses(client_id, [response]) | Receives and processes a single response from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
response: A response to process. | ReceiveRRGResponse | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def ReceiveRRGResponses(
self,
client_id: str,
responses: Sequence[rrg_pb2.Response],
) -> None:
"""Receives and processes multiple responses from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
responses: Responses to process.
"""
flow_responses = []
flow_rrg_logs: dict[tuple[int, int], dict[int, rrg_pb2.Log]] = {}
for response in responses:
flow_response: Union[
flows_pb2.FlowResponse,
flows_pb2.FlowStatus,
flows_pb2.FlowIterator,
]
if response.HasField("status"):
flow_response = flows_pb2.FlowStatus()
flow_response.network_bytes_sent = response.status.network_bytes_sent
# TODO: Populate `cpu_time_used` and `runtime_us`
if response.status.HasField("error"):
# TODO: Convert RRG error types to GRR error types.
flow_response.status = flows_pb2.FlowStatus.Status.ERROR
flow_response.error_message = response.status.error.message
else:
flow_response.status = flows_pb2.FlowStatus.Status.OK
elif response.HasField("result"):
flow_response = flows_pb2.FlowResponse()
flow_response.any_payload.CopyFrom(response.result)
elif response.HasField("log"):
request_rrg_logs = flow_rrg_logs.setdefault(
(response.flow_id, response.request_id), {}
)
request_rrg_logs[response.response_id] = response.log
continue
else:
raise ValueError(f"Unexpected response: {response}")
flow_response.client_id = client_id
flow_response.flow_id = f"{response.flow_id:016X}"
flow_response.request_id = response.request_id
flow_response.response_id = response.response_id
flow_responses.append(flow_response)
data_store.REL_DB.WriteFlowResponses(flow_responses)
for (flow_id, request_id), logs in flow_rrg_logs.items():
data_store.REL_DB.WriteFlowRRGLogs(
client_id=client_id,
flow_id=f"{flow_id:016X}",
request_id=request_id,
logs=logs,
) | Receives and processes multiple responses from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
responses: Responses to process. | ReceiveRRGResponses | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def ReceiveRRGParcel(
self,
client_id: str,
parcel: rrg_pb2.Parcel,
) -> None:
"""Receives and processes a single parcel from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
parcel: A parcel to process.
"""
self.ReceiveRRGParcels(client_id, [parcel]) | Receives and processes a single parcel from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
parcel: A parcel to process. | ReceiveRRGParcel | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def ReceiveRRGParcels(
self,
client_id: str,
parcels: Sequence[rrg_pb2.Parcel],
) -> None:
"""Receives and processes multiple parcels from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
parcels: Parcels to process.
"""
parcels_by_sink_name = {}
for parcel in parcels:
sink_name = rrg_pb2.Sink.Name(parcel.sink)
parcels_by_sink_name.setdefault(sink_name, []).append(parcel)
for sink_name, sink_parcels in parcels_by_sink_name.items():
RRG_PARCEL_COUNT.Increment(fields=[sink_name], delta=len(sink_parcels))
try:
sinks.AcceptMany(client_id, parcels)
except Exception: # pylint: disable=broad-exception-caught
# TODO: `AcceptMany` should raise an error that specifies
# which sink caused the exception. Then we don't have to increment the
# count for all sinks.
for sink_name in parcels_by_sink_name:
RRG_PARCEL_ACCEPT_ERRORS.Increment(fields=[sink_name])
logging.exception("Failed to process parcels for '%s'", client_id) | Receives and processes multiple parcels from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
parcels: Parcels to process. | ReceiveRRGParcels | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def Start(self) -> None:
"""The first state of the flow.""" | The first state of the flow. | Start | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def End(self) -> None:
"""Final state.
This method is called prior to destruction of the flow.
""" | Final state.
This method is called prior to destruction of the flow. | End | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallState(
self,
next_state: str = "",
start_time: Optional[rdfvalue.RDFDatetime] = None,
responses: Optional[Sequence[rdf_structs.RDFStruct]] = None,
):
"""This method is used to schedule a new state on a different worker.
This is basically the same as CallFlow() except we are calling
ourselves. The state will be invoked at a later time.
Args:
next_state: The state in this flow to be invoked.
start_time: Start the flow at this time. This delays notification for
flow processing into the future. Note that the flow may still be
processed earlier if there are client responses waiting.
responses: If specified, responses to be passed to the next state.
Raises:
ValueError: The next state specified does not exist.
FlowError: Method shouldn't be used in this flow (only_protos_allowed).
"""
# Start method is special and not ran with `RunStateMethod` by `StartFlow`.
# Rather, we call `CallState` directly because it can be scheduled for the
# future (`start_time`), different than `RunStateMethod` that runs now.
if self.only_protos_allowed and next_state != "Start":
raise FlowError(
"`CallState` is not allowed for flows that only allow protos. Use"
" `CallStateProto` instead."
)
if not getattr(self, next_state):
raise ValueError("Next state %s is invalid." % next_state)
request_id = self.GetNextOutboundId()
if responses:
for index, r in enumerate(responses):
wrapped_response = rdf_flow_objects.FlowResponse(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=request_id,
response_id=index,
payload=r,
)
self.flow_responses.append(wrapped_response)
self.flow_responses.append(
rdf_flow_objects.FlowStatus(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=request_id,
response_id=len(responses) + 1,
status=rdf_flow_objects.FlowStatus.Status.OK,
)
)
nr_responses_expected = len(responses) + 1
else:
nr_responses_expected = 0
flow_request = rdf_flow_objects.FlowRequest(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=request_id,
next_state=next_state,
start_time=start_time,
nr_responses_expected=nr_responses_expected,
needs_processing=True,
)
self.flow_requests.append(flow_request) | This method is used to schedule a new state on a different worker.
This is basically the same as CallFlow() except we are calling
ourselves. The state will be invoked at a later time.
Args:
next_state: The state in this flow to be invoked.
start_time: Start the flow at this time. This delays notification for
flow processing into the future. Note that the flow may still be
processed earlier if there are client responses waiting.
responses: If specified, responses to be passed to the next state.
Raises:
ValueError: The next state specified does not exist.
FlowError: Method shouldn't be used in this flow (only_protos_allowed). | CallState | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallStateProto(
self,
next_state: str = "",
start_time: Optional[rdfvalue.RDFDatetime] = None,
responses: Optional[Sequence[pb_message.Message]] = None,
):
"""This method is used to schedule a new state on a different worker.
This is basically the same as CallFlow() except we are calling
ourselves. The state will be invoked at a later time.
Args:
next_state: The state in this flow to be invoked.
start_time: Start the flow at this time. This delays notification for
flow processing into the future. Note that the flow may still be
processed earlier if there are client responses waiting.
responses: If specified, responses to be passed to the next state.
Raises:
ValueError: The next state specified does not exist.
"""
if not getattr(self, next_state):
raise ValueError("Next state %s is invalid." % next_state)
request_id = self.GetNextOutboundId()
if responses:
for index, r in enumerate(responses):
_ValidateProto(r)
wrapped_response = flows_pb2.FlowResponse(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=request_id,
response_id=index,
)
wrapped_response.any_payload.Pack(r)
# TODO: Remove dynamic `payload` field.
wrapped_response.payload.Pack(r)
self.proto_flow_responses.append(wrapped_response)
self.proto_flow_responses.append(
flows_pb2.FlowStatus(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=request_id,
response_id=len(responses) + 1,
status=flows_pb2.FlowStatus.Status.OK,
)
)
nr_responses_expected = len(responses) + 1
else:
nr_responses_expected = 0
flow_request = flows_pb2.FlowRequest(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=request_id,
next_state=next_state,
nr_responses_expected=nr_responses_expected,
needs_processing=True,
)
if start_time is not None:
flow_request.start_time = int(start_time)
self.proto_flow_requests.append(flow_request) | This method is used to schedule a new state on a different worker.
This is basically the same as CallFlow() except we are calling
ourselves. The state will be invoked at a later time.
Args:
next_state: The state in this flow to be invoked.
start_time: Start the flow at this time. This delays notification for
flow processing into the future. Note that the flow may still be
processed earlier if there are client responses waiting.
responses: If specified, responses to be passed to the next state.
Raises:
ValueError: The next state specified does not exist. | CallStateProto | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallStateInline(
self,
messages: Optional[
Sequence[
Union[
rdf_flow_objects.FlowResponse,
rdf_flow_objects.FlowStatus,
rdf_flow_objects.FlowIterator,
],
]
] = None,
next_state: str = "",
request_data: Optional[Mapping[str, Any]] = None,
responses: Optional[flow_responses.Responses] = None,
):
"""Calls a state inline (immediately).
If `responses` is not specified, `messages` and `request_data` are used to
create a `flow_responses.Responses` object. Otherwise `responses` is used
as is.
Args:
messages: responses to be passed to the state (only used if `responses` is
not provided).
next_state: The state to be called.
request_data: An arbitrary dict to be passed to the called state (only
used if `responses` is not provided).
responses: Responses to pass to the state (as is). If not specified,
`messages` and `request_data` are used to create a
`flow_responses.Responses` object.
Raises:
FlowError: Method shouldn't be used in this flow (only_protos_allowed).
"""
if self.only_protos_allowed:
raise FlowError(
"`CallStateInline` is not allowed for flows that only allow protos."
" Use `CallStateInlineProtoWithResponses` or "
)
if responses is None:
responses = flow_responses.FakeResponses(messages, request_data)
getattr(self, next_state)(responses) | Calls a state inline (immediately).
If `responses` is not specified, `messages` and `request_data` are used to
create a `flow_responses.Responses` object. Otherwise `responses` is used
as is.
Args:
messages: responses to be passed to the state (only used if `responses` is
not provided).
next_state: The state to be called.
request_data: An arbitrary dict to be passed to the called state (only
used if `responses` is not provided).
responses: Responses to pass to the state (as is). If not specified,
`messages` and `request_data` are used to create a
`flow_responses.Responses` object.
Raises:
FlowError: Method shouldn't be used in this flow (only_protos_allowed). | CallStateInline | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallStateInlineProtoWithResponses(
self,
next_state: str = "",
responses: Optional[flow_responses.Responses[any_pb2.Any]] = None,
):
"""Calls a state inline (immediately).
The state must be annotated with `@UseProto2AnyResponses`.
Args:
next_state: The state to be called.
responses: Responses to pass to the state (as is).
"""
method = getattr(self, next_state)
# Raise if the method is not annotated with `@UseProto2AnyResponses`.
# This means it still expects RDFValues, we should use `CallStateInline`.
if (
not hasattr(method, "_proto2_any_responses")
or not method._proto2_any_responses # pylint: disable=protected-access
):
raise ValueError(
f"Method {method.__name__} is not annotated with"
" `@UseProto2AnyResponses`. Please use `CallStateInline` instead."
)
# Method expects Responses[any_pb2.Any].
if responses is not None:
# TODO: Remove this check once flow targets use pytype.
for r in responses:
if not isinstance(r, any_pb2.Any):
raise ValueError(
f"Expected Responses[any_pb2.Any] but got Responses[{type(r)}]"
)
method(responses) | Calls a state inline (immediately).
The state must be annotated with `@UseProto2AnyResponses`.
Args:
next_state: The state to be called.
responses: Responses to pass to the state (as is). | CallStateInlineProtoWithResponses | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallStateInlineProto(
self,
next_state: str = "",
messages: Optional[Sequence[pb_message.Message]] = None,
request_data: Optional[Mapping[str, Any]] = None,
) -> None:
"""Calls a state inline (immediately).
The state must be annotated with `@UseProto2AnyResponses`.
Args:
next_state: The state to be called.
messages: responses to be passed to the state.
request_data: An arbitrary dict to be passed to the called state
"""
method = getattr(self, next_state)
# Raise if the method is not annotated with `@UseProto2AnyResponses`.
# This means it still expects RDFValues, we should use `CallStateInline`.
if (
not hasattr(method, "_proto2_any_responses")
or not method._proto2_any_responses # pylint: disable=protected-access
):
raise ValueError(
f"Method {method.__name__} is not annotated with"
" `@UseProto2AnyResponses`. Please use `CallStateInline` instead."
)
# Use `messages` and make sure they're packed into `any_pb2.Any`s.
any_msgs: list[any_pb2.Any] = []
if messages is not None:
for r in messages:
_ValidateProto(r)
if isinstance(r, any_pb2.Any):
raise ValueError(
f"Expected unpacked proto message but got an any_pb2.Any: {r}"
)
any_msg = any_pb2.Any()
any_msg.Pack(r)
any_msgs.append(any_msg)
responses = flow_responses.FakeResponses(any_msgs, request_data)
method(responses) | Calls a state inline (immediately).
The state must be annotated with `@UseProto2AnyResponses`.
Args:
next_state: The state to be called.
messages: responses to be passed to the state.
request_data: An arbitrary dict to be passed to the called state | CallStateInlineProto | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def _GetAndCheckResourceLimits(self) -> _ResourceLimits:
"""Calculates and checks if the flow has exceeded any resource limits.
Returns:
A _ResourceLimits object with the calculated limits.
Raises:
FlowResourcesExceededError: If any resource limit has been exceeded.
"""
cpu_limit_ms = None
network_bytes_limit = None
runtime_limit_us = self.rdf_flow.runtime_limit_us
if self.rdf_flow.cpu_limit:
cpu_usage = self.rdf_flow.cpu_time_used
cpu_limit_ms = 1000 * max(
self.rdf_flow.cpu_limit
- cpu_usage.user_cpu_time
- cpu_usage.system_cpu_time,
0,
)
if cpu_limit_ms == 0:
raise flow.FlowResourcesExceededError(
"CPU limit exceeded for {} {}.".format(
self.rdf_flow.flow_class_name, self.rdf_flow.flow_id
)
)
if self.rdf_flow.network_bytes_limit:
network_bytes_limit = max(
self.rdf_flow.network_bytes_limit - self.rdf_flow.network_bytes_sent,
0,
)
if network_bytes_limit == 0:
raise flow.FlowResourcesExceededError(
"Network limit exceeded for {} {}.".format(
self.rdf_flow.flow_class_name, self.rdf_flow.flow_id
)
)
if runtime_limit_us and self.rdf_flow.runtime_us:
if self.rdf_flow.runtime_us < runtime_limit_us:
runtime_limit_us -= self.rdf_flow.runtime_us
else:
raise flow.FlowResourcesExceededError(
"Runtime limit exceeded for {} {}.".format(
self.rdf_flow.flow_class_name, self.rdf_flow.flow_id
)
)
return self._ResourceLimits(
cpu_limit_ms=cpu_limit_ms,
network_bytes_limit=network_bytes_limit,
runtime_limit_us=runtime_limit_us,
) | Calculates and checks if the flow has exceeded any resource limits.
Returns:
A _ResourceLimits object with the calculated limits.
Raises:
FlowResourcesExceededError: If any resource limit has been exceeded. | _GetAndCheckResourceLimits | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallClient(
self,
action_cls: Type[server_stubs.ClientActionStub],
request: Optional[rdfvalue.RDFValue] = None,
next_state: Optional[str] = None,
callback_state: Optional[str] = None,
request_data: Optional[Mapping[str, Any]] = None,
):
"""Calls the client asynchronously.
This sends a message to the client to invoke an Action. The run action may
send back many responses that will be queued by the framework until a status
message is sent by the client. The status message will cause the entire
transaction to be committed to the specified state.
Args:
action_cls: The function to call on the client.
request: The request to send to the client. Must be of the correct type
for the action.
next_state: The state in this flow, that responses to this message should
go to.
callback_state: (optional) The state to call whenever a new response is
arriving.
request_data: A dict which will be available in the RequestState
protobuf. The Responses object maintains a reference to this protobuf
for use in the execution of the state method. (so you can access this
data by responses.request).
Raises:
ValueError: The request passed to the client does not have the correct
type.
FlowError: Method shouldn't be used in this flow (only_protos_allowed).
"""
if self.only_protos_allowed:
raise FlowError(
"`CallClient` is not allowed for flows that only allow protos. Use"
" `CallClientProto` instead."
)
try:
action_identifier = action_registry.ID_BY_ACTION_STUB[action_cls]
except KeyError:
raise ValueError("Action class %s not known." % action_cls)
if action_cls.in_rdfvalue is None:
if request:
raise ValueError("Client action %s does not expect args." % action_cls)
else:
# Verify that the request type matches the client action requirements.
if not isinstance(request, action_cls.in_rdfvalue):
raise ValueError(
"Client action expected %s but got %s"
% (action_cls.in_rdfvalue, type(request))
)
outbound_id = self.GetNextOutboundId()
# Create a flow request.
flow_request = rdf_flow_objects.FlowRequest(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=outbound_id,
next_state=next_state,
callback_state=callback_state,
)
if request_data is not None:
flow_request.request_data = rdf_protodict.Dict().FromDict(request_data)
limits = self._GetAndCheckResourceLimits()
stub = action_registry.ACTION_STUB_BY_ID[action_identifier]
client_action_request = rdf_flows.GrrMessage(
session_id="%s/%s" % (self.rdf_flow.client_id, self.rdf_flow.flow_id),
name=stub.__name__,
request_id=outbound_id,
payload=request,
network_bytes_limit=limits.network_bytes_limit,
runtime_limit_us=limits.runtime_limit_us,
)
if limits.cpu_limit_ms is not None:
client_action_request.cpu_limit = limits.cpu_limit_ms / 1000.0
self.flow_requests.append(flow_request)
self.client_action_requests.append(client_action_request) | Calls the client asynchronously.
This sends a message to the client to invoke an Action. The run action may
send back many responses that will be queued by the framework until a status
message is sent by the client. The status message will cause the entire
transaction to be committed to the specified state.
Args:
action_cls: The function to call on the client.
request: The request to send to the client. Must be of the correct type
for the action.
next_state: The state in this flow, that responses to this message should
go to.
callback_state: (optional) The state to call whenever a new response is
arriving.
request_data: A dict which will be available in the RequestState
protobuf. The Responses object maintains a reference to this protobuf
for use in the execution of the state method. (so you can access this
data by responses.request).
Raises:
ValueError: The request passed to the client does not have the correct
type.
FlowError: Method shouldn't be used in this flow (only_protos_allowed). | CallClient | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallClientProto(
self,
action_cls: Type[server_stubs.ClientActionStub],
action_args: Optional[pb_message.Message] = None,
next_state: Optional[str] = None,
callback_state: Optional[str] = None,
request_data: Optional[dict[str, Any]] = None,
):
"""Calls the client asynchronously.
This sends a message to the client to invoke an Action. The run action may
send back many responses that will be queued by the framework until a status
message is sent by the client. The status message will cause the entire
transaction to be committed to the specified state.
Args:
action_cls: The function to call on the client.
action_args: The arguments to send to the client. Must be of the correct
type for the action.
next_state: The state in this flow, that responses to this message should
go to.
callback_state: (optional) The state to call whenever a new response is
arriving.
request_data: A dict which will be available in the RequestState
protobuf. The Responses object maintains a reference to this protobuf
for use in the execution of the state method. (so you can access this
data by responses.request).
Raises:
ValueError: The client action does not exist/is not registered.
TypeError: The arguments passed to the client does not have the correct
type.
"""
try:
action_registry.ID_BY_ACTION_STUB[action_cls]
except KeyError:
raise ValueError("Action class %s not known." % action_cls) from None
if action_cls.in_proto is None and action_args:
raise ValueError(
f"Client action {action_cls.__name__} does not expect args yet some"
f" were provided: {action_args}"
)
elif action_cls.in_proto is not None:
if action_args is None:
raise ValueError(
f"Client action {action_cls.__name__} expects args, but none were"
" provided."
)
# Verify that the action_args type matches the client action requirements.
if not isinstance(action_args, action_cls.in_proto):
raise ValueError(
"Client action expected %s but got %s"
% (action_cls.in_proto, type(action_args))
)
outbound_id = self.GetNextOutboundId()
# Create a flow request.
flow_request = flows_pb2.FlowRequest(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=outbound_id,
next_state=next_state,
callback_state=callback_state,
)
if request_data is not None:
flow_request.request_data.CopyFrom(
mig_protodict.FromNativeDictToProtoDict(request_data)
)
limits = self._GetAndCheckResourceLimits()
client_action_request = jobs_pb2.GrrMessage(
session_id="%s/%s" % (self.rdf_flow.client_id, self.rdf_flow.flow_id),
name=action_cls.__name__,
request_id=outbound_id,
network_bytes_limit=limits.network_bytes_limit,
runtime_limit_us=limits.runtime_limit_us,
)
if action_args:
# We rely on the fact that the in_proto and in_rdfvalue fields in the stub
# represent the same type. That is:
# cls.in_rdfvalue.protobuf == cls.in_proto
# We use that to manually build the proto as prescribed by the GrrMessage
# RDF class.
models_clients.SetGrrMessagePayload(
client_action_request, action_cls.in_rdfvalue.__name__, action_args
)
self.proto_flow_requests.append(flow_request)
self.proto_client_action_requests.append(client_action_request) | Calls the client asynchronously.
This sends a message to the client to invoke an Action. The run action may
send back many responses that will be queued by the framework until a status
message is sent by the client. The status message will cause the entire
transaction to be committed to the specified state.
Args:
action_cls: The function to call on the client.
action_args: The arguments to send to the client. Must be of the correct
type for the action.
next_state: The state in this flow, that responses to this message should
go to.
callback_state: (optional) The state to call whenever a new response is
arriving.
request_data: A dict which will be available in the RequestState
protobuf. The Responses object maintains a reference to this protobuf
for use in the execution of the state method. (so you can access this
data by responses.request).
Raises:
ValueError: The client action does not exist/is not registered.
TypeError: The arguments passed to the client does not have the correct
type. | CallClientProto | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallFlow(
self,
flow_name: Optional[str] = None,
next_state: Optional[str] = None,
request_data: Optional[Mapping[str, Any]] = None,
output_plugins: Optional[
Sequence[rdf_output_plugin.OutputPluginDescriptor]
] = None,
flow_args: Optional[rdf_structs.RDFStruct] = None,
) -> str:
"""Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
next_state: The state in this flow, that responses to this message should
go to.
request_data: Any dict provided here will be available in the
RequestState protobuf. The Responses object maintains a reference to
this protobuf for use in the execution of the state method. (so you can
access this data by responses.request). There is no format mandated on
this data but it may be a serialized protobuf.
output_plugins: A list of output plugins to use for this flow.
flow_args: Arguments for the child flow.
Returns:
The flow_id of the child flow which was created.
Raises:
ValueError: The requested next state does not exist.
FlowError: Method shouldn't be used in this flow (only_protos_allowed).
"""
if self.only_protos_allowed:
raise FlowError(
"`CallFlow` is not allowed for flows that only allow protos. Use"
" `CallFlowProto` instead."
)
if not getattr(self, next_state):
raise ValueError("Next state %s is invalid." % next_state)
flow_request = rdf_flow_objects.FlowRequest(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=self.GetNextOutboundId(),
next_state=next_state,
)
if request_data is not None:
flow_request.request_data = rdf_protodict.Dict().FromDict(request_data)
self.flow_requests.append(flow_request)
flow_cls = FlowRegistry.FlowClassByName(flow_name)
return flow.StartFlow(
client_id=self.rdf_flow.client_id,
flow_cls=flow_cls,
parent=flow.FlowParent.FromFlow(self),
output_plugins=output_plugins,
flow_args=flow_args,
) | Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
next_state: The state in this flow, that responses to this message should
go to.
request_data: Any dict provided here will be available in the
RequestState protobuf. The Responses object maintains a reference to
this protobuf for use in the execution of the state method. (so you can
access this data by responses.request). There is no format mandated on
this data but it may be a serialized protobuf.
output_plugins: A list of output plugins to use for this flow.
flow_args: Arguments for the child flow.
Returns:
The flow_id of the child flow which was created.
Raises:
ValueError: The requested next state does not exist.
FlowError: Method shouldn't be used in this flow (only_protos_allowed). | CallFlow | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def CallFlowProto(
self,
flow_name: Optional[str] = None,
next_state: Optional[str] = None,
request_data: Optional[dict[str, Any]] = None,
output_plugins: Optional[
Sequence[rdf_output_plugin.OutputPluginDescriptor]
] = None,
flow_args: Optional[pb_message.Message] = None,
) -> str:
"""Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
next_state: The state in this flow, that responses to this message should
go to.
request_data: Any dict provided here will be available in the
RequestState protobuf. The Responses object maintains a reference to
this protobuf for use in the execution of the state method. (so you can
access this data by responses.request). There is no format mandated on
this data but it may be a serialized protobuf.
output_plugins: A list of output plugins to use for this flow.
flow_args: Arguments for the child flow.
Returns:
The flow_id of the child flow which was created.
Raises:
ValueError: The requested next state does not exist.
"""
if not getattr(self, next_state):
raise ValueError("Next state %s is invalid." % next_state)
flow_request = flows_pb2.FlowRequest(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
request_id=self.GetNextOutboundId(),
next_state=next_state,
)
if request_data is not None:
flow_request.request_data.CopyFrom(
mig_protodict.FromNativeDictToProtoDict(request_data)
)
self.proto_flow_requests.append(flow_request)
flow_cls = FlowRegistry.FlowClassByName(flow_name)
rdf_flow_args = None
if flow_args:
if flow_cls.args_type.protobuf != type(flow_args):
raise ValueError(
f"Flow {flow_name} expects args of type"
f" {flow_cls.args_type.protobuf} but got {type(flow_args)}"
)
# We try on a best-effort basis to convert the flow args to RDFValue.
rdf_flow_args = flow_cls.args_type.FromSerializedBytes(
flow_args.SerializeToString()
)
# TODO: Allow `StartFlow` to take proto args in.
return flow.StartFlow(
client_id=self.rdf_flow.client_id,
flow_cls=flow_cls,
parent=flow.FlowParent.FromFlow(self),
output_plugins=output_plugins,
flow_args=rdf_flow_args,
) | Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
next_state: The state in this flow, that responses to this message should
go to.
request_data: Any dict provided here will be available in the
RequestState protobuf. The Responses object maintains a reference to
this protobuf for use in the execution of the state method. (so you can
access this data by responses.request). There is no format mandated on
this data but it may be a serialized protobuf.
output_plugins: A list of output plugins to use for this flow.
flow_args: Arguments for the child flow.
Returns:
The flow_id of the child flow which was created.
Raises:
ValueError: The requested next state does not exist. | CallFlowProto | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def SendReply(
self, response: rdfvalue.RDFValue, tag: Optional[str] = None
) -> None:
"""Allows this flow to send a message to its parent flow.
If this flow does not have a parent, the message is saved to the database
as flow result.
Args:
response: An RDFValue() instance to be sent to the parent.
tag: If specified, tag the result with this tag.
Raises:
ValueError: If responses is not of the correct type.
FlowError: Method shouldn't be used in this flow (only_protos_allowed).
"""
if self.only_protos_allowed:
raise FlowError(
"`SendReply` is not allowed for flows that only allow protos. Use"
" `SendReplyProto` instead."
)
if not isinstance(response, rdfvalue.RDFValue):
raise ValueError(
f"SendReply can only send RDFValues, got {type(response)}"
)
if not any(isinstance(response, t) for t in self.result_types):
logging.warning(
"Flow %s sends response of unexpected type %s.",
type(self).__name__,
type(response).__name__,
)
reply = rdf_flow_objects.FlowResult(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
payload=response,
tag=tag,
)
if self.rdf_flow.parent_flow_id:
if isinstance(response, rdf_structs.RDFProtoStruct):
rdf_packed_payload = rdf_structs.AnyValue.Pack(response)
else:
# Should log for `GetMBR` flow which returns `RDFBytes`.
# Might fail for others that we're unaware but also return primitives.
logging.error(
"Flow %s sends response of unexpected type %s.",
self.__class__.__name__,
type(response),
)
rdf_packed_payload = None
flow_response = rdf_flow_objects.FlowResponse(
client_id=self.rdf_flow.client_id,
request_id=self.rdf_flow.parent_request_id,
response_id=self.GetNextResponseId(),
payload=response,
any_payload=rdf_packed_payload,
flow_id=self.rdf_flow.parent_flow_id,
tag=tag,
)
self.flow_responses.append(flow_response)
# For nested flows we want the replies to be written,
# but not to be processed by output plugins.
self.replies_to_write.append(reply)
else:
self.replies_to_write.append(reply)
self.replies_to_process.append(reply)
self.rdf_flow.num_replies_sent += 1
# Keeping track of result types/tags in a plain Python
# _num_replies_per_type_tag dict. In RDFValues/proto2 we have to represent
# dictionaries as lists of key-value pairs (i.e. there's no library
# support for dicts as data structures). Hence, updating a key would require
# iterating over the pairs - which might get expensive for hundreds of
# thousands of results. To avoid the issue we keep a non-serialized Python
# dict to be later accumulated into a serializable FlowResultCount
# in PersistState().
key = (type(response).__name__, tag or "")
self._num_replies_per_type_tag[key] += 1 | Allows this flow to send a message to its parent flow.
If this flow does not have a parent, the message is saved to the database
as flow result.
Args:
response: An RDFValue() instance to be sent to the parent.
tag: If specified, tag the result with this tag.
Raises:
ValueError: If responses is not of the correct type.
FlowError: Method shouldn't be used in this flow (only_protos_allowed). | SendReply | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def SendReplyProto(
self,
response: pb_message.Message,
tag: Optional[str] = None,
) -> None:
"""Allows this flow to save a flow result to the database.
In case of a child flow, results are also returned to the parent flow.
Args:
response: A protobuf instance to be sent to the parent.
tag: If specified, tag the result with this tag.
Raises:
TypeError: If responses is not of the correct type.
"""
if not isinstance(response, pb_message.Message):
raise TypeError(
f"SendReplyProto can only send Protobufs, got {type(response)}"
)
if not any(isinstance(response, t) for t in self.proto_result_types):
raise TypeError(
f"Flow {type(self).__name__} sends response of unexpected type"
f" {type(response).__name__}. Expected one of"
f" {self.proto_result_types}",
)
reply = flows_pb2.FlowResult(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
tag=tag,
)
reply.payload.Pack(response)
self.proto_replies_to_write.append(reply)
if self.rdf_flow.parent_flow_id:
res = flows_pb2.FlowResponse(
client_id=self.rdf_flow.client_id,
request_id=self.rdf_flow.parent_request_id,
response_id=self.GetNextResponseId(),
flow_id=self.rdf_flow.parent_flow_id,
tag=tag,
)
res.payload.Pack(response)
res.any_payload.Pack(response)
self.proto_flow_responses.append(res)
else:
# We only want to process replies with output plugins if this is
# a parent flow (not nested).
self.proto_replies_to_process.append(reply)
self.rdf_flow.num_replies_sent += 1
# Keeping track of result types/tags in a plain Python
# _num_replies_per_type_tag dict. In RDFValues/proto2 we have to represent
# dictionaries as lists of key-value pairs (i.e. there's no library
# support for dicts as data structures). Hence, updating a key would require
# iterating over the pairs - which might get expensive for hundreds of
# thousands of results. To avoid the issue we keep a non-serialized Python
# dict to be later accumulated into a serializable FlowResultCount
# in PersistState().
key = (type(response).__name__, tag or "")
self._num_replies_per_type_tag[key] += 1 | Allows this flow to save a flow result to the database.
In case of a child flow, results are also returned to the parent flow.
Args:
response: A protobuf instance to be sent to the parent.
tag: If specified, tag the result with this tag.
Raises:
TypeError: If responses is not of the correct type. | SendReplyProto | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def SaveResourceUsage(self, status: rdf_flow_objects.FlowStatus) -> None:
"""Method to tally resources."""
user_cpu = status.cpu_time_used.user_cpu_time
system_cpu = status.cpu_time_used.system_cpu_time
self.rdf_flow.cpu_time_used.user_cpu_time += user_cpu
self.rdf_flow.cpu_time_used.system_cpu_time += system_cpu
self.rdf_flow.network_bytes_sent += status.network_bytes_sent
if not self.rdf_flow.runtime_us:
self.rdf_flow.runtime_us = rdfvalue.Duration(0)
if status.runtime_us:
self.rdf_flow.runtime_us += status.runtime_us
if self.rdf_flow.cpu_limit:
user_cpu_total = self.rdf_flow.cpu_time_used.user_cpu_time
system_cpu_total = self.rdf_flow.cpu_time_used.system_cpu_time
if self.rdf_flow.cpu_limit < (user_cpu_total + system_cpu_total):
# We have exceeded our CPU time limit, stop this flow.
raise flow.FlowResourcesExceededError(
"CPU limit exceeded for {} {}.".format(
self.rdf_flow.flow_class_name, self.rdf_flow.flow_id
)
)
if (
self.rdf_flow.network_bytes_limit
and self.rdf_flow.network_bytes_limit < self.rdf_flow.network_bytes_sent
):
# We have exceeded our byte limit, stop this flow.
raise flow.FlowResourcesExceededError(
"Network bytes limit exceeded {} {}.".format(
self.rdf_flow.flow_class_name, self.rdf_flow.flow_id
)
)
if (
self.rdf_flow.runtime_limit_us
and self.rdf_flow.runtime_limit_us < self.rdf_flow.runtime_us
):
raise flow.FlowResourcesExceededError(
"Runtime limit exceeded {} {}.".format(
self.rdf_flow.flow_class_name, self.rdf_flow.flow_id
)
) | Method to tally resources. | SaveResourceUsage | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def Error(
self,
error_message: Optional[str] = None,
backtrace: Optional[str] = None,
status: Optional[rdf_structs.EnumNamedValue] = None,
) -> None:
"""Terminates this flow with an error."""
flow_name = self.__class__.__name__
is_child = bool(self.rdf_flow.parent_flow_id)
exception_name = _ExtractExceptionName(error_message)
FLOW_ERRORS.Increment(fields=[flow_name, is_child, exception_name])
client_id = self.rdf_flow.client_id
flow_id = self.rdf_flow.flow_id
# backtrace is set for unexpected failures caught in a wildcard except
# branch, thus these should be logged as error. backtrace is None for
# faults that are anticipated in flows, thus should only be logged as
# warning.
if backtrace:
logging.error(
"Error in flow %s on %s: %s, %s",
flow_id,
client_id,
error_message,
backtrace,
)
else:
logging.warning(
"Error in flow %s on %s: %s:", flow_id, client_id, error_message
)
if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id:
status_msg = rdf_flow_objects.FlowStatus(
client_id=client_id,
request_id=self.rdf_flow.parent_request_id,
response_id=self.GetNextResponseId(),
cpu_time_used=self.rdf_flow.cpu_time_used,
network_bytes_sent=self.rdf_flow.network_bytes_sent,
runtime_us=self.rdf_flow.runtime_us,
error_message=error_message,
flow_id=self.rdf_flow.parent_flow_id,
backtrace=backtrace,
)
if status is not None:
status_msg.status = status
else:
status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR
if self.rdf_flow.parent_flow_id:
self.flow_responses.append(status_msg)
elif self.rdf_flow.parent_hunt_id:
hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id)
self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR
if backtrace is not None:
self.rdf_flow.backtrace = backtrace
if error_message is not None:
self.rdf_flow.error_message = error_message
self.NotifyCreatorOfError() | Terminates this flow with an error. | Error | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def _ClearAllRequestsAndResponses(self) -> None:
"""Clears all requests and responses."""
client_id = self.rdf_flow.client_id
flow_id = self.rdf_flow.flow_id
# Remove all requests queued for deletion that we delete in the call below.
self.completed_requests = [
r
for r in self.completed_requests
if r.client_id != client_id or r.flow_id != flow_id
]
data_store.REL_DB.DeleteAllFlowRequestsAndResponses(client_id, flow_id) | Clears all requests and responses. | _ClearAllRequestsAndResponses | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def NotifyAboutEnd(self) -> None:
"""Notify about the end of the flow."""
# Sum up number of replies to write with the number of already
# written results.
num_results = (
len(self.replies_to_write)
+ len(self.proto_replies_to_write)
+ data_store.REL_DB.CountFlowResults(
self.rdf_flow.client_id, self.rdf_flow.flow_id
)
)
flow_ref = objects_pb2.FlowReference(
client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id
)
notification_lib.Notify(
self.creator,
objects_pb2.UserNotification.Type.TYPE_FLOW_RUN_COMPLETED,
"Flow %s completed with %d %s"
% (
self.__class__.__name__,
num_results,
num_results == 1 and "result" or "results",
),
objects_pb2.ObjectReference(
reference_type=objects_pb2.ObjectReference.Type.FLOW, flow=flow_ref
),
) | Notify about the end of the flow. | NotifyAboutEnd | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def MarkDone(self, status=None):
"""Marks this flow as done."""
FLOW_COMPLETIONS.Increment(fields=[self.__class__.__name__])
# Notify our parent flow or hunt that we are done (if there's a parent flow
# or hunt).
if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id:
status = rdf_flow_objects.FlowStatus(
client_id=self.rdf_flow.client_id,
request_id=self.rdf_flow.parent_request_id,
response_id=self.GetNextResponseId(),
status=rdf_flow_objects.FlowStatus.Status.OK,
cpu_time_used=self.rdf_flow.cpu_time_used,
network_bytes_sent=self.rdf_flow.network_bytes_sent,
runtime_us=self.rdf_flow.runtime_us,
flow_id=self.rdf_flow.parent_flow_id,
)
if self.rdf_flow.parent_flow_id:
self.flow_responses.append(status)
elif self.rdf_flow.parent_hunt_id:
hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id)
self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED
if self.ShouldSendNotifications():
self.NotifyAboutEnd() | Marks this flow as done. | MarkDone | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def Log(self, format_str: str, *args: object) -> None:
"""Logs the message using the flow's standard logging.
Args:
format_str: Format string
*args: arguments to the format string
"""
# If there are no formatting arguments given, we do not format the message.
# This behaviour is in-line with `logging.*` functions and allows one to log
# messages with `%` without weird workarounds.
if not args:
message = format_str
else:
message = format_str % args
log_entry = flows_pb2.FlowLogEntry(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
message=message,
)
data_store.REL_DB.WriteFlowLogEntry(log_entry) | Logs the message using the flow's standard logging.
Args:
format_str: Format string
*args: arguments to the format string | Log | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def RunStateMethod(
self,
method_name: str,
request: Optional[rdf_flow_objects.FlowRequest] = None,
responses: Optional[
Sequence[
Union[
rdf_flow_objects.FlowResponse,
rdf_flow_objects.FlowStatus,
rdf_flow_objects.FlowIterator,
]
]
] = None,
) -> None:
"""Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of FlowResponses, FlowStatuses, and FlowIterators
responding to the request.
Raises:
FlowError: Processing time for the flow has expired.
"""
client_id = self.rdf_flow.client_id
deadline = self.rdf_flow.processing_deadline
if deadline and rdfvalue.RDFDatetime.Now() > deadline:
raise FlowError(
"Processing time for flow %s on %s expired."
% (self.rdf_flow.flow_id, self.rdf_flow.client_id)
)
self.rdf_flow.current_state = method_name
if request and responses:
logging.debug(
"Running %s for flow %s on %s, %d responses.",
method_name,
self.rdf_flow.flow_id,
client_id,
len(responses),
)
else:
logging.debug(
"Running %s for flow %s on %s",
method_name,
self.rdf_flow.flow_id,
client_id,
)
try:
try:
method = getattr(self, method_name)
except AttributeError:
raise ValueError(
"Flow %s has no state method %s"
% (self.__class__.__name__, method_name)
) from None
# Prepare a responses object for the state method to use:
if responses is not None and (
hasattr(method, "_proto2_any_responses")
and method._proto2_any_responses # pylint: disable=protected-access
):
responses = flow_responses.Responses.FromResponsesProto2Any(
responses, request
)
else:
responses = flow_responses.Responses.FromResponses(
request=request, responses=responses
)
if responses.status is not None:
self.SaveResourceUsage(responses.status)
GRR_WORKER_STATES_RUN.Increment()
if method_name == "Start":
FLOW_STARTS.Increment(fields=[self.rdf_flow.flow_class_name])
method()
elif method_name == "End":
method()
else:
method(responses)
# TODO: Refactor output plugins to be internally proto-based.
if self.proto_replies_to_process:
rdf_replies = [
mig_flow_objects.ToRDFFlowResult(r)
for r in self.proto_replies_to_process
]
self.replies_to_process.extend(rdf_replies)
self.proto_replies_to_process = []
if self.replies_to_process:
if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id:
self._ProcessRepliesWithHuntOutputPlugins(self.replies_to_process)
else:
self._ProcessRepliesWithFlowOutputPlugins(self.replies_to_process)
self.replies_to_process = []
except flow.FlowResourcesExceededError as e:
logging.info(
"Flow %s on %s exceeded resource limits: %s.",
self.rdf_flow.flow_id,
client_id,
str(e),
)
self.Error(error_message=str(e))
# We don't know here what exceptions can be thrown in the flow but we have
# to continue. Thus, we catch everything.
except Exception as e: # pylint: disable=broad-except
msg = str(e)
self.Error(error_message=msg, backtrace=traceback.format_exc()) | Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of FlowResponses, FlowStatuses, and FlowIterators
responding to the request.
Raises:
FlowError: Processing time for the flow has expired. | RunStateMethod | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def ProcessAllReadyRequests(self) -> tuple[int, int]:
"""Processes all requests that are due to run.
Returns:
(processed, incrementally_processed) The number of completed processed
requests and the number of incrementally processed ones.
"""
request_dict = data_store.REL_DB.ReadFlowRequests(
self.rdf_flow.client_id,
self.rdf_flow.flow_id,
)
completed_requests = FindCompletedRequestsToProcess(
request_dict,
self.rdf_flow.next_request_to_process,
)
incremental_requests = FindIncrementalRequestsToProcess(
request_dict,
self.rdf_flow.next_request_to_process,
)
# When dealing with a callback flow, count all incremental requests even if
# `incremental_requests` is empty, as it's expected that messages might
# arrive in the wrong order and therefore not always be suitable for
# processing.
num_incremental = sum(
[1 for _, (req, _) in request_dict.items() if req.callback_state]
)
next_response_id_map = {}
# Process incremental requests' updates first. Incremental requests have
# the 'callback_state' attribute set and the callback state is called
# every time new responses arrive. Note that the id of the next expected
# response is kept in request's 'next_response_id' attribute to guarantee
# that responses are going to be processed in the right order.
for request, responses in incremental_requests:
request = mig_flow_objects.ToRDFFlowRequest(request)
if not self.IsRunning():
break
# Responses have to be processed in the correct order, no response
# can be skipped.
rdf_responses = []
for r in responses:
if isinstance(r, flows_pb2.FlowResponse):
rdf_responses.append(mig_flow_objects.ToRDFFlowResponse(r))
if isinstance(r, flows_pb2.FlowStatus):
rdf_responses.append(mig_flow_objects.ToRDFFlowStatus(r))
if isinstance(r, flows_pb2.FlowIterator):
rdf_responses.append(mig_flow_objects.ToRDFFlowIterator(r))
if rdf_responses:
# We do not sent incremental updates for FlowStatus updates.
# TODO: Check if the id of last message in to_process, the
# FlowStatus, is important to keep for the next_response_id map, as the
# flow is anyways complete then. If not we can skip adding the
# FlowStatus to the `to_process` list instead of filtering it out here.
flow_updates = [
r
for r in rdf_responses
if not isinstance(r, rdf_flow_objects.FlowStatus)
]
if flow_updates:
self.RunStateMethod(request.callback_state, request, flow_updates)
# If the request was processed, update the next_response_id.
next_response_id_map[request.request_id] = (
rdf_responses[-1].response_id + 1
)
if next_response_id_map:
data_store.REL_DB.UpdateIncrementalFlowRequests(
self.rdf_flow.client_id, self.rdf_flow.flow_id, next_response_id_map
)
# Process completed requests.
#
# If the flow gets a bunch of requests to process and processing one of
# them leads to flow termination, other requests should be ignored.
# Hence: self.IsRunning check in the loop's condition.
for request, responses in completed_requests:
if not self.IsRunning():
break
rdf_request = mig_flow_objects.ToRDFFlowRequest(request)
rdf_responses = []
for r in responses:
if isinstance(r, flows_pb2.FlowResponse):
rdf_responses.append(mig_flow_objects.ToRDFFlowResponse(r))
if isinstance(r, flows_pb2.FlowStatus):
rdf_responses.append(mig_flow_objects.ToRDFFlowStatus(r))
if isinstance(r, flows_pb2.FlowIterator):
rdf_responses.append(mig_flow_objects.ToRDFFlowIterator(r))
# If there's not even a `Status` response, we send `None` as response.
if not rdf_responses:
rdf_responses = None
self.RunStateMethod(request.next_state, rdf_request, rdf_responses)
self.rdf_flow.next_request_to_process += 1
self.completed_requests.append(request)
if (
completed_requests
and self.IsRunning()
and not self.outstanding_requests
):
self.RunStateMethod("End")
if (
self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING
and not self.outstanding_requests
):
self.MarkDone()
self.PersistState()
if not self.IsRunning():
# All requests and responses can now be deleted.
self._ClearAllRequestsAndResponses()
return len(completed_requests), num_incremental | Processes all requests that are due to run.
Returns:
(processed, incrementally_processed) The number of completed processed
requests and the number of incrementally processed ones. | ProcessAllReadyRequests | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def outstanding_requests(self) -> int:
"""Returns the number of all outstanding requests.
This is used to determine if the flow needs to be destroyed yet.
Returns:
the number of all outstanding requests.
"""
return (
self.rdf_flow.next_outbound_id - self.rdf_flow.next_request_to_process
) | Returns the number of all outstanding requests.
This is used to determine if the flow needs to be destroyed yet.
Returns:
the number of all outstanding requests. | outstanding_requests | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def FlushQueuedMessages(self) -> None:
"""Flushes queued messages."""
# TODO(amoser): This could be done in a single db call, might be worth
# optimizing.
if self.flow_requests or self.proto_flow_requests:
all_requests = [
mig_flow_objects.ToProtoFlowRequest(r) for r in self.flow_requests
] + self.proto_flow_requests
# We make a single DB call to write all requests. Contrary to what the
# name suggests, this method does more than writing the requests to the
# DB. It also tallies the flows that need processing and updates the
# next request to process. Writing the requests in separate calls can
# interfere with this process.
data_store.REL_DB.WriteFlowRequests(all_requests)
self.flow_requests = []
self.proto_flow_requests = []
if self.flow_responses:
flow_responses_proto = []
for r in self.flow_responses:
if isinstance(r, rdf_flow_objects.FlowResponse):
flow_responses_proto.append(mig_flow_objects.ToProtoFlowResponse(r))
if isinstance(r, rdf_flow_objects.FlowStatus):
flow_responses_proto.append(mig_flow_objects.ToProtoFlowStatus(r))
if isinstance(r, rdf_flow_objects.FlowIterator):
flow_responses_proto.append(mig_flow_objects.ToProtoFlowIterator(r))
data_store.REL_DB.WriteFlowResponses(flow_responses_proto)
self.flow_responses = []
if self.proto_flow_responses:
data_store.REL_DB.WriteFlowResponses(self.proto_flow_responses)
self.proto_flow_responses = []
if self.client_action_requests:
client_id = self.rdf_flow.client_id
for request in self.client_action_requests:
fleetspeak_utils.SendGrrMessageThroughFleetspeak(client_id, request)
self.client_action_requests = []
if self.proto_client_action_requests:
client_id = self.rdf_flow.client_id
for request in self.proto_client_action_requests:
fleetspeak_utils.SendGrrMessageProtoThroughFleetspeak(
client_id, request
)
self.proto_client_action_requests = []
for request in self.rrg_requests:
fleetspeak_utils.SendRrgRequest(self.rdf_flow.client_id, request)
self.rrg_requests = []
if self.completed_requests:
data_store.REL_DB.DeleteFlowRequests(self.completed_requests)
self.completed_requests = []
if self.proto_replies_to_write or self.replies_to_write:
all_results = self.proto_replies_to_write + [
mig_flow_objects.ToProtoFlowResult(r) for r in self.replies_to_write
]
# Write flow results to REL_DB, even if the flow is a nested flow.
data_store.REL_DB.WriteFlowResults(all_results)
if self.rdf_flow.parent_hunt_id:
hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id)
self.proto_replies_to_write = []
self.replies_to_write = [] | Flushes queued messages. | FlushQueuedMessages | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def _ProcessRepliesWithHuntOutputPlugins(
self, replies: Sequence[rdf_flow_objects.FlowResult]
) -> None:
"""Applies output plugins to hunt results."""
hunt_obj = data_store.REL_DB.ReadHuntObject(self.rdf_flow.parent_hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
self.rdf_flow.output_plugins = hunt_obj.output_plugins
hunt_output_plugins_states = data_store.REL_DB.ReadHuntOutputPluginsStates(
self.rdf_flow.parent_hunt_id
)
hunt_output_plugins_states = [
mig_flow_runner.ToRDFOutputPluginState(s)
for s in hunt_output_plugins_states
]
self.rdf_flow.output_plugins_states = hunt_output_plugins_states
created_plugins = self._ProcessRepliesWithFlowOutputPlugins(replies)
for index, (plugin, state) in enumerate(
zip(created_plugins, hunt_output_plugins_states)
):
if plugin is None:
continue
# Only do the REL_DB call if the plugin state has actually changed.
s = state.plugin_state.Copy()
plugin.UpdateState(s)
if s != state.plugin_state:
def UpdateFn(
plugin_state: jobs_pb2.AttributedDict,
) -> jobs_pb2.AttributedDict:
plugin_state_rdf = mig_protodict.ToRDFAttributedDict(plugin_state)
plugin.UpdateState(plugin_state_rdf) # pylint: disable=cell-var-from-loop
plugin_state = mig_protodict.ToProtoAttributedDict(plugin_state_rdf)
return plugin_state
data_store.REL_DB.UpdateHuntOutputPluginState(
hunt_obj.hunt_id, index, UpdateFn
)
for plugin_def, created_plugin in zip(
hunt_obj.output_plugins, created_plugins
):
if created_plugin is not None:
HUNT_RESULTS_RAN_THROUGH_PLUGIN.Increment(
len(replies), fields=[plugin_def.plugin_name]
)
else:
HUNT_OUTPUT_PLUGIN_ERRORS.Increment(fields=[plugin_def.plugin_name]) | Applies output plugins to hunt results. | _ProcessRepliesWithHuntOutputPlugins | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def _ProcessRepliesWithFlowOutputPlugins(
self, replies: Sequence[rdf_flow_objects.FlowResult]
) -> Sequence[Optional[output_plugin_lib.OutputPlugin]]:
"""Processes replies with output plugins."""
created_output_plugins = []
for index, output_plugin_state in enumerate(
self.rdf_flow.output_plugins_states
):
plugin_descriptor = output_plugin_state.plugin_descriptor
output_plugin_cls = plugin_descriptor.GetPluginClass()
args = plugin_descriptor.args
output_plugin = output_plugin_cls(
source_urn=self.rdf_flow.long_flow_id, args=args
)
try:
output_plugin.ProcessResponses(
output_plugin_state.plugin_state,
replies,
)
output_plugin.Flush(output_plugin_state.plugin_state)
output_plugin.UpdateState(output_plugin_state.plugin_state)
data_store.REL_DB.WriteFlowOutputPluginLogEntry(
flows_pb2.FlowOutputPluginLogEntry(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
output_plugin_id="%d" % index,
log_entry_type=flows_pb2.FlowOutputPluginLogEntry.LogEntryType.LOG,
message="Processed %d replies." % len(replies),
)
)
self.Log(
"Plugin %s successfully processed %d flow replies.",
plugin_descriptor,
len(replies),
)
created_output_plugins.append(output_plugin)
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Plugin %s failed to process %d replies.",
plugin_descriptor,
len(replies),
)
created_output_plugins.append(None)
data_store.REL_DB.WriteFlowOutputPluginLogEntry(
flows_pb2.FlowOutputPluginLogEntry(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
output_plugin_id="%d" % index,
log_entry_type=flows_pb2.FlowOutputPluginLogEntry.LogEntryType.ERROR,
message="Error while processing %d replies: %s"
% (len(replies), str(e)),
)
)
self.Log(
"Plugin %s failed to process %d replies due to: %s",
plugin_descriptor,
len(replies),
e,
)
return created_output_plugins | Processes replies with output plugins. | _ProcessRepliesWithFlowOutputPlugins | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def MergeQueuedMessages(self, flow_obj: "FlowBase") -> None:
"""Merges queued messages."""
self.flow_requests.extend(flow_obj.flow_requests)
flow_obj.flow_requests = []
self.proto_flow_requests.extend(flow_obj.proto_flow_requests)
flow_obj.proto_flow_requests = []
self.flow_responses.extend(flow_obj.flow_responses)
flow_obj.flow_responses = []
self.proto_flow_responses.extend(flow_obj.proto_flow_responses)
flow_obj.proto_flow_responses = []
self.rrg_requests.extend(flow_obj.rrg_requests)
flow_obj.rrg_requests = []
self.client_action_requests.extend(flow_obj.client_action_requests)
flow_obj.client_action_requests = []
self.proto_client_action_requests.extend(
flow_obj.proto_client_action_requests
)
flow_obj.proto_client_action_requests = []
self.completed_requests.extend(flow_obj.completed_requests)
flow_obj.completed_requests = []
self.replies_to_write.extend(flow_obj.replies_to_write)
flow_obj.replies_to_write = []
self.proto_replies_to_write.extend(flow_obj.proto_replies_to_write)
flow_obj.proto_replies_to_write = [] | Merges queued messages. | MergeQueuedMessages | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def GetFilesArchiveMappings(
self, flow_results: Iterator[rdf_flow_objects.FlowResult]
) -> Iterator[ClientPathArchiveMapping]:
"""Returns a mapping used to generate flow results archive.
If this is implemented by a flow, then instead of generating
a general-purpose archive with all files referenced in the
results present, an archive would be generated with
just the files referenced in the mappings.
Args:
flow_results: An iterator for flow results.
Returns:
An iterator of mappings from REL_DB's ClientPaths to archive paths.
Raises:
NotImplementedError: if not implemented by a subclass.
"""
raise NotImplementedError("GetFilesArchiveMappings() not implemented") | Returns a mapping used to generate flow results archive.
If this is implemented by a flow, then instead of generating
a general-purpose archive with all files referenced in the
results present, an archive would be generated with
just the files referenced in the mappings.
Args:
flow_results: An iterator for flow results.
Returns:
An iterator of mappings from REL_DB's ClientPaths to archive paths.
Raises:
NotImplementedError: if not implemented by a subclass. | GetFilesArchiveMappings | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def _AccountForProtoResultMetadata(self):
"""Merges `_num_replies_per_type_tag` Counter with current ResultMetadata."""
self._result_metadata.is_metadata_set = True
for r in self._result_metadata.num_results_per_type_tag:
key = (r.type, r.tag)
# This removes the item from _num_replies_per_type_tag if it's present in
# result_metadata.
count = self._num_replies_per_type_tag.pop(key, 0)
r.count = r.count + count
# Iterate over remaining items - i.e. items that were not present in
# result_metadata.
for (
result_type,
result_tag,
), count in self._num_replies_per_type_tag.items():
self._result_metadata.num_results_per_type_tag.append(
flows_pb2.FlowResultCount(
type=result_type, tag=result_tag, count=count
)
)
self._num_replies_per_type_tag = collections.Counter()
self.rdf_flow.result_metadata = (
rdf_flow_objects.FlowResultMetadata().FromSerializedBytes(
self._result_metadata.SerializeToString()
)
) | Merges `_num_replies_per_type_tag` Counter with current ResultMetadata. | _AccountForProtoResultMetadata | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def PersistState(self) -> None:
"""Persists flow state."""
self._AccountForProtoResultMetadata()
self.rdf_flow.persistent_data = self.state
if self._store is not None:
self.rdf_flow.store = rdf_structs.AnyValue.PackProto2(self._store)
if self._progress is not None:
self.rdf_flow.progress = rdf_structs.AnyValue.PackProto2(self._progress) | Persists flow state. | PersistState | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def args(self, args: rdfvalue.RDFValue) -> None:
"""Updates both rdf and proto args."""
if not isinstance(args, self.args_type):
raise TypeError(
f"args must be of type {self.args_type}, got {type(args)} instead."
)
self.rdf_flow.args = args
self._proto_args = self.proto_args_type()
self._proto_args.ParseFromString(args.SerializeToBytes()) | Updates both rdf and proto args. | args | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def proto_args(self) -> _ProtoArgsT:
"""Returns the proto args."""
if self._proto_args is not None:
return self._proto_args
# We use `rdf_flow.args` as source of truth for now.
if self.rdf_flow.HasField("args"):
# Hope serialization is compatible
args = self.proto_args_type()
args.ParseFromString(self.args.SerializeToBytes())
self._proto_args = args
else:
self._proto_args = self.proto_args_type()
return self._proto_args | Returns the proto args. | proto_args | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def proto_args(self, proto_args: Optional[_ProtoArgsT]) -> None:
"""Updates both rdf and proto args."""
if not isinstance(proto_args, self.proto_args_type):
raise TypeError(
f"proto_args must be of type {self.proto_args_type}, got"
f" {type(proto_args)} instead."
)
self._proto_args = proto_args
self.rdf_flow.args = self.args_type.FromSerializedBytes(
proto_args.SerializeToString()
) | Updates both rdf and proto args. | proto_args | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def UseProto2AnyResponses(
state_method: Callable[
[FlowBase, flow_responses.Responses[any_pb2.Any]], None
],
) -> Callable[[FlowBase, flow_responses.Responses[any_pb2.Any]], None]:
"""Instructs flow execution not to use RDF magic for unpacking responses.
The current default behaviour of the flow execution is to do type lookup and
automagically unpack flow responses to "appropriate" type. This behaviour is
problematic for many reasons and methods that do not need to rely on it should
use this annotation.
Args:
state_method: A flow state method to annotate.
Returns:
A flow state method that will not have the problematic behaviour.
"""
@functools.wraps(state_method)
def Wrapper(self, responses: flow_responses.Responses) -> None:
return state_method(self, responses)
Wrapper._proto2_any_responses = True # pylint: disable=protected-access
return Wrapper | Instructs flow execution not to use RDF magic for unpacking responses.
The current default behaviour of the flow execution is to do type lookup and
automagically unpack flow responses to "appropriate" type. This behaviour is
problematic for many reasons and methods that do not need to rely on it should
use this annotation.
Args:
state_method: A flow state method to annotate.
Returns:
A flow state method that will not have the problematic behaviour. | UseProto2AnyResponses | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def _TerminateFlow(
proto_flow: flows_pb2.Flow,
reason: Optional[str] = None,
flow_state: rdf_structs.EnumNamedValue = rdf_flow_objects.Flow.FlowState.ERROR,
) -> None:
"""Does the actual termination."""
flow_cls = FlowRegistry.FlowClassByName(proto_flow.flow_class_name)
rdf_flow = mig_flow_objects.ToRDFFlow(proto_flow)
flow_obj = flow_cls(rdf_flow)
if not flow_obj.IsRunning():
# Nothing to do.
return
logging.info(
"Terminating flow %s on %s, reason: %s",
rdf_flow.flow_id,
rdf_flow.client_id,
reason,
)
rdf_flow.flow_state = flow_state
rdf_flow.error_message = reason
flow_obj.NotifyCreatorOfError()
proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow)
data_store.REL_DB.UpdateFlow(
proto_flow.client_id,
proto_flow.flow_id,
flow_obj=proto_flow,
processing_on=None,
processing_since=None,
processing_deadline=None,
)
data_store.REL_DB.DeleteAllFlowRequestsAndResponses(
proto_flow.client_id, proto_flow.flow_id
) | Does the actual termination. | _TerminateFlow | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def TerminateFlow(
client_id: str,
flow_id: str,
reason: Optional[str] = None,
flow_state: rdf_structs.EnumNamedValue = rdf_flow_objects.Flow.FlowState.ERROR,
) -> None:
"""Terminates a flow and all of its children.
Args:
client_id: Client ID of a flow to terminate.
flow_id: Flow ID of a flow to terminate.
reason: String with a termination reason.
flow_state: Flow state to be assigned to a flow after termination. Defaults
to FlowState.ERROR.
"""
to_terminate = [data_store.REL_DB.ReadFlowObject(client_id, flow_id)]
while to_terminate:
next_to_terminate = []
for proto_flow in to_terminate:
_TerminateFlow(proto_flow, reason=reason, flow_state=flow_state)
next_to_terminate.extend(
data_store.REL_DB.ReadChildFlowObjects(
proto_flow.client_id, proto_flow.flow_id
)
)
to_terminate = next_to_terminate | Terminates a flow and all of its children.
Args:
client_id: Client ID of a flow to terminate.
flow_id: Flow ID of a flow to terminate.
reason: String with a termination reason.
flow_state: Flow state to be assigned to a flow after termination. Defaults
to FlowState.ERROR. | TerminateFlow | python | google/grr | grr/server/grr_response_server/flow_base.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py | Apache-2.0 |
def GetKnowledgeBase(rdf_client_obj, allow_uninitialized=False):
"""Returns a knowledgebase from an rdf client object."""
if not allow_uninitialized:
if rdf_client_obj is None:
raise artifact_utils.KnowledgeBaseUninitializedError(
"No client snapshot given."
)
if rdf_client_obj.knowledge_base is None:
raise artifact_utils.KnowledgeBaseUninitializedError(
"KnowledgeBase empty for %s." % rdf_client_obj.client_id
)
kb = rdf_client_obj.knowledge_base
if not kb.os:
raise artifact_utils.KnowledgeBaseAttributesMissingError(
"KnowledgeBase missing OS for %s. Knowledgebase content: %s"
% (rdf_client_obj.client_id, kb)
)
if rdf_client_obj is None or rdf_client_obj.knowledge_base is None:
return rdf_client.KnowledgeBase()
version = rdf_client_obj.os_version.split(".")
kb = rdf_client_obj.knowledge_base
try:
kb.os_major_version = int(version[0])
if len(version) > 1:
kb.os_minor_version = int(version[1])
except ValueError:
pass
return kb | Returns a knowledgebase from an rdf client object. | GetKnowledgeBase | python | google/grr | grr/server/grr_response_server/artifact.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py | Apache-2.0 |
def Start(self):
"""For each artifact, create subflows for each collector."""
self.state.knowledge_base = None
self.InitializeKnowledgeBase()
if self.client_os == "Linux":
self.CallFlow(
distro.CollectDistroInfo.__name__,
next_state=self._ProcessLinuxDistroInfo.__name__,
)
self.CallClient(
server_stubs.EnumerateUsers,
next_state=self._ProcessLinuxEnumerateUsers.__name__,
)
elif self.client_os == "Darwin":
list_users_dir_request = rdf_client_action.ListDirRequest()
list_users_dir_request.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS
list_users_dir_request.pathspec.path = "/Users"
self.CallClient(
server_stubs.ListDirectory,
request=list_users_dir_request,
next_state=self._ProcessMacosListUsersDirectory.__name__,
)
elif self.client_os == "Windows":
# pylint: disable=line-too-long
# fmt: off
if self.rrg_support:
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion",
name="SystemRoot",
),
next_state=self._ProcessRRGWindowsEnvSystemRoot.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows\CurrentVersion",
name="ProgramFilesDir",
),
next_state=self._ProcessRRGWindowsEnvProgramFilesDir.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows\CurrentVersion",
name="ProgramFilesDir (x86)",
),
next_state=self._ProcessRRGWindowsEnvProgramFilesDirX86.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows\CurrentVersion",
name="CommonFilesDir",
),
next_state=self._ProcessRRGWindowsEnvCommonFilesDir.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows\CurrentVersion",
name="CommonFilesDir (x86)",
),
next_state=self._ProcessRRGWindowsEnvCommonFilesDirX86.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList",
name="ProgramData",
),
next_state=self._ProcessRRGWindowsEnvProgramData.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment",
name="DriverData",
),
next_state=self._ProcessRRGWindowsEnvDriverData.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\Select",
name="Current",
),
next_state=self._ProcessRRGWindowsCurrentControlSet.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Control\Nls\CodePage",
name="ACP",
),
next_state=self._ProcessRRGWindowsCodePage.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters",
name="Domain",
),
next_state=self._ProcessRRGWindowsDomain.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation",
name="TimeZoneKeyName",
),
next_state=self._ProcessRRGWindowsTimeZoneKeyName.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment",
name="TEMP",
),
next_state=self._ProcessRRGWindowsEnvTemp.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment",
name="Path",
),
next_state=self._ProcessRRGWindowsEnvPath.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment",
name="ComSpec",
),
next_state=self._ProcessRRGWindowsEnvComSpec.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment",
name="windir",
),
next_state=self._ProcessRRGWindowsEnvWindir.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList",
name="ProfilesDirectory",
),
next_state=self._ProcessRRGWindowsProfilesDirectory.__name__,
)
self.CallRRG(
action=rrg_pb2.GET_WINREG_VALUE,
args=rrg_get_winreg_value_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList",
name="AllUsersProfile",
),
next_state=self._ProcessRRGWindowsEnvAllUsersProfile.__name__,
)
self.CallRRG(
action=rrg_pb2.LIST_WINREG_KEYS,
args=rrg_list_winreg_keys_pb2.Args(
root=rrg_winreg_pb2.LOCAL_MACHINE,
key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList",
),
next_state=self._ProcessRRGWindowsProfileList.__name__,
)
# WMI queries are slow, so we consider them "heavyweight".
if not self.args.lightweight:
users = self.state.knowledge_base.users
self.CallRRG(
action=rrg_pb2.QUERY_WMI,
args=rrg_query_wmi_pb2.Args(
query=f"""
SELECT SID, Name, Domain
FROM Win32_UserAccount
WHERE LocalAccount = TRUE
AND ({" OR ".join(f"SID = '{user.sid}'" for user in users)})
""",
),
next_state=self._ProcessRRGWindowsWMIUserAccount.__name__,
)
else:
# TODO: There is no dedicated action for obtaining registry
# values. The existing artifact collector uses `GetFileStat` action for
# this which is horrible.
args = rdf_client_action.GetFileStatRequest()
args.pathspec.pathtype = rdf_paths.PathSpec.PathType.REGISTRY
args.pathspec.path = r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\SystemRoot"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvSystemRoot.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\ProgramFilesDir"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvProgramFilesDir.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\ProgramFilesDir (x86)"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvProgramFilesDirX86.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\CommonFilesDir"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvCommonFilesDir.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\CommonFilesDir (x86)"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvCommonFilesDirX86.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\ProgramData"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvProgramData.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\DriverData"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvDriverData.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\Select\Current"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsCurrentControlSet.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Nls\CodePage\ACP"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsCodePage.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Services\Tcpip\Parameters\Domain"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsDomain.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\TimeZoneInformation\TimeZoneKeyName"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsTimeZoneKeyName.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\TEMP"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvTemp.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\Path"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvPath.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\ComSpec"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvComSpec.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\windir"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvWindir.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\ProfilesDirectory"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsProfilesDirectory.__name__,
)
args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\AllUsersProfile"
self.CallClient(
server_stubs.GetFileStat,
args,
next_state=self._ProcessWindowsEnvAllUsersProfile.__name__,
)
args = rdf_file_finder.FileFinderArgs()
# TODO: There is no dedicated action for obtaining registry
# values but `STAT` action of the file-finder will get it. This should be
# refactored once registry-specific actions are available.
args.action.action_type = rdf_file_finder.FileFinderAction.Action.STAT
args.pathtype = rdf_paths.PathSpec.PathType.REGISTRY
args.paths = [r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\*\ProfileImagePath"]
# TODO: remove this when the registry+sandboxing bug
# is fixed.
args.implementation_type = rdf_paths.PathSpec.ImplementationType.DIRECT
self.CallClient(
server_stubs.VfsFileFinder,
args,
next_state=self._ProcessWindowsProfiles.__name__,
) | For each artifact, create subflows for each collector. | Start | python | google/grr | grr/server/grr_response_server/artifact.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py | Apache-2.0 |
def End(self) -> None:
"""Finish up."""
if self.client_os == "Windows":
self.state.knowledge_base = mig_client.ToRDFKnowledgeBase(
artifact_utils.ExpandKnowledgebaseWindowsEnvVars(
mig_client.ToProtoKnowledgeBase(self.state.knowledge_base),
),
)
# TODO: `%LOCALAPPDATA%` is a very often used variable that we
# potentially not collect due to limitations of the Windows registry. For
# now, in case we did not collect it, we set it to the default Windows value
# (which should be the case almost always but is nevertheless not the most
# way of handling it).
#
# Alternatively, we could develop a more general way of handling default
# environment variable values in case they are missing.
if self.client_os == "Windows":
for user in self.state.knowledge_base.users:
if not user.localappdata:
self.Log(
"Missing `%%LOCALAPPDATA%%` for '%s', using Windows default",
user.username,
)
user.localappdata = rf"{user.userprofile}\AppData\Local"
self.SendReply(self.state.knowledge_base) | Finish up. | End | python | google/grr | grr/server/grr_response_server/artifact.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py | Apache-2.0 |
def InitializeKnowledgeBase(self):
"""Get the existing KB or create a new one if none exists."""
# Always create a new KB to override any old values but keep os and
# version so we know which artifacts we can run.
self.state.knowledge_base = rdf_client.KnowledgeBase()
snapshot = data_store.REL_DB.ReadClientSnapshot(self.client_id)
if not snapshot or not snapshot.knowledge_base:
return
kb = snapshot.knowledge_base
state_kb = self.state.knowledge_base
state_kb.os = kb.os
state_kb.os_major_version = kb.os_major_version
state_kb.os_minor_version = kb.os_minor_version
if not state_kb.os_major_version and snapshot.os_version:
version = snapshot.os_version.split(".")
try:
state_kb.os_major_version = int(version[0])
if len(version) > 1:
state_kb.os_minor_version = int(version[1])
except ValueError:
pass | Get the existing KB or create a new one if none exists. | InitializeKnowledgeBase | python | google/grr | grr/server/grr_response_server/artifact.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py | Apache-2.0 |
def UploadArtifactYamlFile(
file_content,
overwrite=True,
overwrite_system_artifacts=False,
):
"""Upload a yaml or json file as an artifact to the datastore."""
loaded_artifacts = []
registry_obj = artifact_registry.REGISTRY
# Make sure all artifacts are loaded so we don't accidentally overwrite one.
registry_obj.GetArtifacts(reload_datastore_artifacts=True)
new_artifacts = registry_obj.ArtifactsFromYaml(file_content)
# A quick syntax check before we upload anything.
for artifact_value in new_artifacts:
artifact_registry.ValidateSyntax(artifact_value)
for artifact_value in new_artifacts:
registry_obj.RegisterArtifact(
artifact_value,
source="datastore",
overwrite_if_exists=overwrite,
overwrite_system_artifacts=overwrite_system_artifacts,
)
data_store.REL_DB.WriteArtifact(
mig_artifacts.ToProtoArtifact(artifact_value)
)
loaded_artifacts.append(artifact_value)
name = artifact_value.name
logging.info("Uploaded artifact %s.", name)
# Once all artifacts are loaded we can validate dependencies. Note that we do
# not have to perform a syntax validation because it is already done after
# YAML is parsed.
for artifact_value in loaded_artifacts:
artifact_registry.ValidateDependencies(artifact_value) | Upload a yaml or json file as an artifact to the datastore. | UploadArtifactYamlFile | python | google/grr | grr/server/grr_response_server/artifact.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py | Apache-2.0 |
def LoadArtifactsOnce():
"""Loads artifacts from the datastore and from the filesystem.
Datastore gets loaded second so it can override Artifacts in the files.
"""
artifact_registry.REGISTRY.AddDefaultSources() | Loads artifacts from the datastore and from the filesystem.
Datastore gets loaded second so it can override Artifacts in the files. | LoadArtifactsOnce | python | google/grr | grr/server/grr_response_server/artifact.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py | Apache-2.0 |
def _HostPrefix(client_id):
"""Build a host prefix for a notification message based on a client id."""
if not client_id:
return ""
hostname = None
client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id)
if client_snapshot:
hostname = client_snapshot.knowledge_base.fqdn
if hostname:
return "%s: " % hostname
else:
return "" | Build a host prefix for a notification message based on a client id. | _HostPrefix | python | google/grr | grr/server/grr_response_server/notification.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/notification.py | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.