code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def ClientIdFromObjectReference(
object_reference: objects_pb2.ObjectReference,
) -> Optional[str]:
"""Returns the client ID from the given object reference, or None."""
if object_reference.reference_type == objects_pb2.ObjectReference.CLIENT:
return object_reference.client.client_id
elif object_reference.reference_type == objects_pb2.ObjectReference.FLOW:
return object_reference.flow.client_id
elif object_reference.reference_type == objects_pb2.ObjectReference.VFS_FILE:
return object_reference.vfs_file.client_id
elif (
object_reference.reference_type
== objects_pb2.ObjectReference.APPROVAL_REQUEST
and object_reference.approval_request.approval_type
== objects_pb2.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CLIENT
):
return object_reference.approval_request.subject_id
else:
return None | Returns the client ID from the given object reference, or None. | ClientIdFromObjectReference | python | google/grr | grr/server/grr_response_server/notification.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/notification.py | Apache-2.0 |
def Notify(
username: str,
notification_type: "objects_pb2.UserNotification.Type",
message: str,
object_reference: Optional[objects_pb2.ObjectReference],
) -> None:
"""Schedules a new-style REL_DB user notification."""
# Do not try to notify system users (e.g. Cron).
if username in access_control.SYSTEM_USERS:
return
if object_reference:
client_id = ClientIdFromObjectReference(object_reference)
if client_id:
message = _HostPrefix(client_id) + message
n = objects_pb2.UserNotification(
username=username,
notification_type=notification_type,
state=objects_pb2.UserNotification.State.STATE_PENDING,
message=message,
reference=object_reference,
)
data_store.REL_DB.WriteUserNotification(n) | Schedules a new-style REL_DB user notification. | Notify | python | google/grr | grr/server/grr_response_server/notification.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/notification.py | Apache-2.0 |
def InitGRRRootAPI():
"""Initializes the GRR root API."""
return api.GrrApi(
connector=api_shell_raw_access_lib.RawConnector(
context=api_call_context.ApiCallContext(username="GRRConfigUpdater"),
page_size=_GRR_API_PAGE_SIZE,
)
).root | Initializes the GRR root API. | InitGRRRootAPI | python | google/grr | grr/server/grr_response_server/maintenance_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/maintenance_utils.py | Apache-2.0 |
def UploadSignedConfigBlob(content, aff4_path, client_context=None, limit=None):
"""Upload a signed blob into the datastore.
Args:
content: File content to upload.
aff4_path: aff4 path to upload to.
client_context: The configuration contexts to use.
limit: The maximum size of the chunk to use.
Raises:
IOError: On failure to write.
"""
if limit is None:
limit = config.CONFIG["Datastore.maximum_blob_size"]
# Get the values of these parameters which apply to the client running on the
# target platform.
if client_context is None:
# Default to the windows client.
client_context = ["Platform:Windows", "Client Context"]
config.CONFIG.Validate(
parameters="PrivateKeys.executable_signing_private_key"
)
signing_key = config.CONFIG.Get(
"PrivateKeys.executable_signing_private_key", context=client_context
)
verification_key = config.CONFIG.Get(
"Client.executable_signing_public_key", context=client_context
)
signed_binary_utils.WriteSignedBinary(
rdfvalue.RDFURN(aff4_path),
content,
signing_key,
public_key=verification_key,
chunk_size=limit,
)
logging.info("Uploaded to %s", aff4_path) | Upload a signed blob into the datastore.
Args:
content: File content to upload.
aff4_path: aff4 path to upload to.
client_context: The configuration contexts to use.
limit: The maximum size of the chunk to use.
Raises:
IOError: On failure to write. | UploadSignedConfigBlob | python | google/grr | grr/server/grr_response_server/maintenance_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/maintenance_utils.py | Apache-2.0 |
def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):
"""Will return True if hunt's task was assigned to this client before."""
flow_id = hunt_id
try:
cur_flow = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
except db.UnknownFlowError:
return False
if cur_flow.parent_hunt_id != hunt_id:
raise RuntimeError(
"Cannot start Hunt {} on {} because unrelated {} already exists."
.format(hunt_id, client_id, cur_flow.long_flow_id)
)
return True | Will return True if hunt's task was assigned to this client before. | _CheckIfHuntTaskWasAssigned | python | google/grr | grr/server/grr_response_server/foreman.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman.py | Apache-2.0 |
def _RunAction(self, rule, client_id):
"""Run all the actions specified in the rule.
Args:
rule: Rule which actions are to be executed.
client_id: Id of a client where rule's actions are to be executed.
Returns:
Number of actions started.
"""
actions_count = 0
try:
if self._CheckIfHuntTaskWasAssigned(client_id, rule.hunt_id):
raise flow.CanNotStartFlowWithExistingIdError(client_id, rule.hunt_id)
hunt.StartHuntFlowOnClient(client_id, rule.hunt_id)
logging.info(
"Foreman: Started hunt %s on client %s.", rule.hunt_id, client_id
)
actions_count += 1
except flow.CanNotStartFlowWithExistingIdError:
logging.info(
"Foreman: ignoring hunt %s on client %s: was started here before",
rule.hunt_id,
client_id,
)
# There could be all kinds of errors we don't know about when starting the
# hunt so we catch everything here.
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Failure running hunt %s on client %s: %s", rule.hunt_id, client_id, e
)
return actions_count | Run all the actions specified in the rule.
Args:
rule: Rule which actions are to be executed.
client_id: Id of a client where rule's actions are to be executed.
Returns:
Number of actions started. | _RunAction | python | google/grr | grr/server/grr_response_server/foreman.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman.py | Apache-2.0 |
def AssignTasksToClient(self, client_id):
"""Examines our rules and starts up flows based on the client.
Args:
client_id: Client id of the client for tasks to be assigned.
Returns:
Number of assigned tasks.
"""
proto_rules = data_store.REL_DB.ReadAllForemanRules()
rules = [
mig_foreman_rules.ToRDFForemanCondition(cond) for cond in proto_rules
]
if not rules:
return 0
last_foreman_run = self._GetLastForemanRunTime(client_id)
latest_rule_creation_time = max(rule.creation_time for rule in rules)
if latest_rule_creation_time > last_foreman_run:
# Update the latest checked rule on the client.
self._SetLastForemanRunTime(client_id, latest_rule_creation_time)
relevant_rules = []
expired_rules = []
now = rdfvalue.RDFDatetime.Now()
for rule in rules:
if rule.expiration_time < now:
expired_rules.append(rule)
elif rule.creation_time > last_foreman_run:
relevant_rules.append(rule)
actions_count = 0
if relevant_rules:
client_data = data_store.REL_DB.ReadClientFullInfo(client_id)
if client_data is None:
return
client_data = mig_objects.ToRDFClientFullInfo(client_data)
for rule in relevant_rules:
if rule.Evaluate(client_data):
actions_count += self._RunAction(rule, client_id)
if expired_rules:
for rule in expired_rules:
hunt.CompleteHuntIfExpirationTimeReached(rule.hunt_id)
data_store.REL_DB.RemoveExpiredForemanRules()
return actions_count | Examines our rules and starts up flows based on the client.
Args:
client_id: Client id of the client for tasks to be assigned.
Returns:
Number of assigned tasks. | AssignTasksToClient | python | google/grr | grr/server/grr_response_server/foreman.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman.py | Apache-2.0 |
def GetNewEventId(self, event_time=None):
"""Return a unique Event ID string."""
if event_time is None:
event_time = int(time.time() * 1e6)
return "%s:%s:%s" % (event_time, socket.gethostname(), os.getpid()) | Return a unique Event ID string. | GetNewEventId | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def LogHttpAdminUIAccess(self, request, response):
"""Log an http based api call.
Args:
request: A WSGI request object.
response: A WSGI response object.
"""
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
api_method = response.headers.get("X-API-Method", "unknown")
api_reason = "none"
if response.context:
approval = response.context.approval
if approval:
api_reason = approval.reason
log_msg = "%s API call [%s] by %s (reason: %s): %s [%d]" % (
event_id,
api_method,
request.user,
api_reason,
request.full_path,
response.status_code,
)
logging.info(log_msg)
if response.headers.get("X-No-Log") != "True":
entry = models_events.APIAuditEntryFromHttpRequestResponse(
request, response
)
data_store.REL_DB.WriteAPIAuditEntry(entry) | Log an http based api call.
Args:
request: A WSGI request object.
response: A WSGI response object. | LogHttpAdminUIAccess | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def LogHttpFrontendAccess(self, request, source=None, message_count=None):
"""Write a log entry for a Frontend or UI Request.
Args:
request: A HttpRequest protobuf.
source: Client id of the client initiating the request. Optional.
message_count: Number of messages received from the client. Optional.
"""
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
log_msg = "%s-%s [%s]: %s %s %s %s (%d)" % (
event_id,
request.source_ip,
source or "<unknown>",
request.method,
request.url,
request.user_agent,
request.user,
message_count or 0,
)
logging.info(log_msg) | Write a log entry for a Frontend or UI Request.
Args:
request: A HttpRequest protobuf.
source: Client id of the client initiating the request. Optional.
message_count: Number of messages received from the client. Optional. | LogHttpFrontendAccess | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def flush(self):
"""Flush the buffer.
This is called when the buffer is really full, we just just drop one oldest
message.
"""
self.buffer = self.buffer[-self.capacity :] | Flush the buffer.
This is called when the buffer is really full, we just just drop one oldest
message. | flush | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def handleError(self, record):
"""Just ignore socket errors - the syslog server might come back.""" | Just ignore socket errors - the syslog server might come back. | handleError | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def __init__(self, *args, **kwargs):
"""Initializes LogMetricsHandler."""
super().__init__(*args, **kwargs)
self.setLevel(logging.ERROR) | Initializes LogMetricsHandler. | __init__ | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def emit(self, record: logging.LogRecord):
"""Overrides Handler.emit()."""
# From https://docs.python.org/3/library/logging.html#logging.Logger
# logging.error() and logging.exception() log with level ERROR.
# logging.critical() logs with level CRITICAL.
if record.levelno == logging.ERROR:
LOG_CALLS_COUNTER.Increment(fields=["ERROR"])
elif record.levelno == logging.CRITICAL:
LOG_CALLS_COUNTER.Increment(fields=["CRITICAL"]) | Overrides Handler.emit(). | emit | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def InitErrorLogsMonitoring():
"""Sets up error logs monitoring."""
logging.root.addHandler(ErrorLogsHandler())
logging.info("Initialized ErrorLogsHandler.") | Sets up error logs monitoring. | InitErrorLogsMonitoring | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def LogInit():
"""Configure the logging subsystem."""
logging.debug("Initializing Logging subsystem.")
# The root logger.
logger = logging.getLogger()
memory_handlers = [
m
for m in logger.handlers
if m.__class__.__name__ == "PreLoggingMemoryHandler"
]
# Clear all handers.
logger.handlers = list(GetLogHandlers())
SetLogLevels()
# Now flush the old messages into the log files.
for handler in memory_handlers:
for record in handler.buffer:
logger.handle(record) | Configure the logging subsystem. | LogInit | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def AppLogInit():
"""Initialize the Application Log.
This log is what will be used whenever someone does a log.LOGGER call. These
are used for more detailed application or event logs.
Returns:
GrrApplicationLogger object
"""
logging.debug("Initializing Application Logger.")
return GrrApplicationLogger() | Initialize the Application Log.
This log is what will be used whenever someone does a log.LOGGER call. These
are used for more detailed application or event logs.
Returns:
GrrApplicationLogger object | AppLogInit | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def ServerLoggingStartupInit():
"""Initialize the server logging configuration."""
global LOGGER
# `local_log` requires `Logging.path` configuration variable to be set. If it
# is not, we fallback to normal logging (as specified in the config or flags).
if local_log and config.CONFIG["Logging.path"]:
logging.debug("Using local LogInit from %s", local_log)
local_log.LogInit()
logging.debug("Using local AppLogInit from %s", local_log)
LOGGER = local_log.AppLogInit()
else:
LogInit()
LOGGER = AppLogInit()
InitErrorLogsMonitoring() | Initialize the server logging configuration. | ServerLoggingStartupInit | python | google/grr | grr/server/grr_response_server/server_logging.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py | Apache-2.0 |
def Run(self):
"""The actual cron job logic goes into this method.""" | The actual cron job logic goes into this method. | Run | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def StartRun(self, wait_for_start_event, signal_event, wait_for_write_event):
"""Starts a new run for the given cron job."""
# Signal that the cron thread has started. This way the cron scheduler
# will know that the task is not sitting in a threadpool queue, but is
# actually executing.
wait_for_start_event.set()
# Wait until the cron scheduler acknowledges the run. If it doesn't
# acknowledge, just return (it means that the cron scheduler considers
# this task as "not started" and has returned the lease so that another
# worker can pick it up).
if not signal_event.wait(TASK_STARTUP_WAIT):
return
try:
logging.info("Processing cron job: %s", self.job.cron_job_id)
self.run_state.started_at = rdfvalue.RDFDatetime.Now()
self.run_state.status = rdf_cronjobs.CronJobRun.CronJobRunStatus.RUNNING
data_store.REL_DB.WriteCronJobRun(
mig_cronjobs.ToProtoCronJobRun(self.run_state)
)
data_store.REL_DB.UpdateCronJob(
self.job.cron_job_id,
last_run_time=rdfvalue.RDFDatetime.Now(),
current_run_id=self.run_state.run_id,
forced_run_requested=False,
)
finally:
# Notify the cron scheduler that all the DB updates are done. At this
# point the cron scheduler can safely return this job's lease.
wait_for_write_event.set()
try:
self.Run()
self.run_state.status = rdf_cronjobs.CronJobRun.CronJobRunStatus.FINISHED
except LifetimeExceededError:
self.run_state.status = (
rdf_cronjobs.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED
)
CRON_JOB_FAILURE.Increment(fields=[self.job.cron_job_id])
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Cronjob %s failed with an error: %s", self.job.cron_job_id, e
)
CRON_JOB_FAILURE.Increment(fields=[self.job.cron_job_id])
self.run_state.status = rdf_cronjobs.CronJobRun.CronJobRunStatus.ERROR
self.run_state.backtrace = "{}\n\n{}".format(e, traceback.format_exc())
finally:
self.run_state.finished_at = rdfvalue.RDFDatetime.Now()
elapsed = self.run_state.finished_at - self.run_state.started_at
CRON_JOB_LATENCY.RecordEvent(
elapsed.ToFractional(rdfvalue.SECONDS), fields=[self.job.cron_job_id]
)
if self.job.lifetime:
expiration_time = self.run_state.started_at + self.job.lifetime
if self.run_state.finished_at > expiration_time:
self.run_state.status = (
rdf_cronjobs.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED
)
CRON_JOB_TIMEOUT.Increment(fields=[self.job.cron_job_id])
data_store.REL_DB.WriteCronJobRun(
mig_cronjobs.ToProtoCronJobRun(self.run_state)
)
current_job = data_store.REL_DB.ReadCronJob(self.job.cron_job_id)
# If no other job was started while we were running, update last status
# information.
if current_job.current_run_id == self.run_state.run_id:
data_store.REL_DB.UpdateCronJob(
self.job.cron_job_id,
current_run_id=None,
last_run_status=int(self.run_state.status),
) | Starts a new run for the given cron job. | StartRun | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def HeartBeat(self):
"""Terminates a cronjob-run if it has exceeded its maximum runtime.
This is a no-op for cronjobs that allow overruns.
Raises:
LifetimeExceededError: If the cronjob has exceeded its maximum runtime.
"""
# In prod, self.job.lifetime is guaranteed to always be set, and is
# always equal to self.__class__.lifetime. Some tests however, do not
# set the job lifetime, which isn't great.
if self.allow_overruns or not self.job.lifetime:
return
runtime = rdfvalue.RDFDatetime.Now() - self.run_state.started_at
if runtime > self.lifetime:
raise LifetimeExceededError(
"Cronjob run has exceeded the maximum runtime of %s." % self.lifetime
) | Terminates a cronjob-run if it has exceeded its maximum runtime.
This is a no-op for cronjobs that allow overruns.
Raises:
LifetimeExceededError: If the cronjob has exceeded its maximum runtime. | HeartBeat | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def CreateJob(self, cron_args=None, job_id=None, enabled=True):
"""Creates a cron job that runs given flow with a given frequency.
Args:
cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs.
job_id: Use this job_id instead of an autogenerated unique name (used for
system cron jobs - we want them to have well-defined persistent name).
enabled: If False, the job object will be created, but will be disabled.
Returns:
URN of the cron job created.
Raises:
ValueError: This function expects an arg protobuf that starts a
CreateAndRunGenericHuntFlow flow. If the args specify
something else, ValueError is raised.
"""
if not cron_args.flow_name:
raise ValueError("Unspecified flow name")
if not job_id:
# TODO: UInt16 is too small for randomly generated IDs.
uid = random.UInt16()
job_id = "%s_%s" % (cron_args.flow_name, uid)
args = rdf_cronjobs.CronJobAction(
action_type=rdf_cronjobs.CronJobAction.ActionType.HUNT_CRON_ACTION,
hunt_cron_action=rdf_cronjobs.HuntCronAction(
flow_name=cron_args.flow_name,
flow_args=cron_args.flow_args,
hunt_runner_args=cron_args.hunt_runner_args,
),
)
# TODO: Refactor to proto-only.
rdf_job = rdf_cronjobs.CronJob(
cron_job_id=job_id,
description=cron_args.description,
frequency=cron_args.frequency,
lifetime=cron_args.lifetime,
allow_overruns=cron_args.allow_overruns,
args=args,
enabled=enabled,
created_at=rdfvalue.RDFDatetime.Now(),
)
proto_job = mig_cronjobs.ToProtoCronJob(rdf_job)
data_store.REL_DB.WriteCronJob(proto_job)
return job_id | Creates a cron job that runs given flow with a given frequency.
Args:
cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs.
job_id: Use this job_id instead of an autogenerated unique name (used for
system cron jobs - we want them to have well-defined persistent name).
enabled: If False, the job object will be created, but will be disabled.
Returns:
URN of the cron job created.
Raises:
ValueError: This function expects an arg protobuf that starts a
CreateAndRunGenericHuntFlow flow. If the args specify
something else, ValueError is raised. | CreateJob | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def ListJobs(self) -> Sequence[str]:
"""Returns a list of ids of all currently running cron jobs."""
return [job.cron_job_id for job in data_store.REL_DB.ReadCronJobs()] | Returns a list of ids of all currently running cron jobs. | ListJobs | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def ReadJobs(self) -> Sequence[rdf_cronjobs.CronJob]:
"""Returns a list of all currently running cron jobs."""
protos = data_store.REL_DB.ReadCronJobs()
return [mig_cronjobs.ToRDFCronJob(job) for job in protos] | Returns a list of all currently running cron jobs. | ReadJobs | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def EnableJob(self, job_id: str) -> None:
"""Enable cron job with the given id."""
return data_store.REL_DB.EnableCronJob(job_id) | Enable cron job with the given id. | EnableJob | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def DisableJob(self, job_id: str) -> None:
"""Disable cron job with the given id."""
return data_store.REL_DB.DisableCronJob(job_id) | Disable cron job with the given id. | DisableJob | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def DeleteJob(self, job_id: str) -> None:
"""Deletes cron job with the given URN."""
return data_store.REL_DB.DeleteCronJob(job_id) | Deletes cron job with the given URN. | DeleteJob | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def RunOnce(self, names: Sequence[str] = None) -> None:
"""Tries to lock and run cron jobs.
Args:
names: List of cron jobs to run. If unset, run them all.
Raises:
OneOrMoreCronJobsFailedError: if one or more individual cron jobs fail.
Note: a failure of a single cron job doesn't preclude other cron jobs
from running.
"""
proto_leased_jobs = data_store.REL_DB.LeaseCronJobs(
cronjob_ids=names,
lease_time=rdfvalue.Duration.From(10, rdfvalue.MINUTES),
)
logging.info("Leased %d cron jobs for processing.", len(proto_leased_jobs))
if not proto_leased_jobs:
return
rdf_leased_jobs = [
mig_cronjobs.ToRDFCronJob(job) for job in proto_leased_jobs
]
errors = {}
processed_count = 0
for job in sorted(rdf_leased_jobs, key=lambda j: j.cron_job_id):
if self.TerminateStuckRunIfNeeded(job):
continue
if not self.JobDueToRun(job):
continue
try:
if self.RunJob(job):
processed_count += 1
else:
logging.info(
"Can't schedule cron job %s on a thread pool "
"(all threads are busy or CPU load is high)",
job.cron_job_id,
)
break
except Exception as e: # pylint: disable=broad-except
logging.exception("Cron job %s has failed: %s", job.cron_job_id, e)
errors[job.cron_job_id] = e
logging.info("Processed %d cron jobs.", processed_count)
updated_proto_leased_jobs = [
mig_cronjobs.ToProtoCronJob(job) for job in rdf_leased_jobs
]
data_store.REL_DB.ReturnLeasedCronJobs(updated_proto_leased_jobs)
if errors:
raise OneOrMoreCronJobsFailedError(errors) | Tries to lock and run cron jobs.
Args:
names: List of cron jobs to run. If unset, run them all.
Raises:
OneOrMoreCronJobsFailedError: if one or more individual cron jobs fail.
Note: a failure of a single cron job doesn't preclude other cron jobs
from running. | RunOnce | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def TerminateStuckRunIfNeeded(self, job: rdf_cronjobs.CronJob) -> None:
"""Cleans up job state if the last run is stuck."""
if job.current_run_id and job.last_run_time and job.lifetime:
now = rdfvalue.RDFDatetime.Now()
# We add additional 10 minutes to give the job run a chance to kill itself
# during one of the HeartBeat calls (HeartBeat checks if a cron job is
# run is running too long and raises if it is).
expiration_time = (
job.last_run_time
+ job.lifetime
+ rdfvalue.Duration.From(10, rdfvalue.MINUTES)
)
if now > expiration_time:
proto_run = data_store.REL_DB.ReadCronJobRun(
job.cron_job_id, job.current_run_id
)
proto_run.status = (
flows_pb2.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED
)
proto_run.finished_at = now.AsMicrosecondsSinceEpoch()
data_store.REL_DB.WriteCronJobRun(proto_run)
data_store.REL_DB.UpdateCronJob(
job.cron_job_id,
current_run_id=None,
last_run_status=proto_run.status,
)
CRON_JOB_LATENCY.RecordEvent(
(now - job.last_run_time).ToFractional(rdfvalue.SECONDS),
fields=[job.cron_job_id],
)
CRON_JOB_TIMEOUT.Increment(fields=[job.cron_job_id])
return True
return False | Cleans up job state if the last run is stuck. | TerminateStuckRunIfNeeded | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def RunJob(self, job: rdf_cronjobs.CronJob) -> None:
"""Does the actual work of the Cron, if the job is due to run.
Args:
job: The cronjob rdfvalue that should be run. Must be leased.
Returns:
A boolean indicating if this cron job was started or not. False may
be returned when the threadpool is already full.
Raises:
LockError: if the object is not locked.
ValueError: If the job argument is invalid.
"""
if not job.leased_until:
raise LockError("CronJob must be leased for Run() to be called.")
if job.leased_until < rdfvalue.RDFDatetime.Now():
raise LockError("CronJob lease expired for %s." % job.cron_job_id)
logging.info("Starting cron job: %s", job.cron_job_id)
if job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION:
cls_name = job.args.system_cron_action.job_class_name
job_cls = SystemCronJobRegistry.CronJobClassByName(cls_name)
name = "%s runner" % cls_name
elif job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION:
job_cls = CronJobRegistry.CronJobClassByName("RunHunt")
name = "Hunt runner"
else:
raise ValueError(
"CronJob %s doesn't have a valid args type set." % job.cron_job_id
)
run_state = rdf_cronjobs.CronJobRun(
cron_job_id=job.cron_job_id, status="RUNNING"
)
run_state.GenerateRunId()
run_obj = job_cls(run_state, job)
wait_for_start_event, signal_event, wait_for_write_event = (
threading.Event(),
threading.Event(),
threading.Event(),
)
try:
self._GetThreadPool().AddTask(
target=run_obj.StartRun,
args=(wait_for_start_event, signal_event, wait_for_write_event),
name=name,
blocking=False,
inline=False,
)
if not wait_for_start_event.wait(TASK_STARTUP_WAIT):
logging.error(
"Cron job run task for %s is too slow to start.", job.cron_job_id
)
# Most likely the thread pool is full and the task is sitting on the
# queue. Make sure we don't put more things on the queue by returning
# False.
return False
# We know that the cron job task has started, unblock it by setting
# the signal event. If signal_event is not set (this happens if the
# task sits on a ThreadPool's queue doing nothing, see the
# if-statement above) the task will just be a no-op when ThreadPool
# finally gets to it. This way we can ensure that we can safely return
# the lease and let another worker schedule the same job.
signal_event.set()
wait_for_write_event.wait(TASK_STARTUP_WAIT)
return True
except threadpool.Full:
return False | Does the actual work of the Cron, if the job is due to run.
Args:
job: The cronjob rdfvalue that should be run. Must be leased.
Returns:
A boolean indicating if this cron job was started or not. False may
be returned when the threadpool is already full.
Raises:
LockError: if the object is not locked.
ValueError: If the job argument is invalid. | RunJob | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def JobIsRunning(self, job):
"""Returns True if there's a currently running iteration of this job."""
return bool(job.current_run_id) | Returns True if there's a currently running iteration of this job. | JobIsRunning | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def JobDueToRun(self, job: rdf_cronjobs.CronJob) -> bool:
"""Determines if the given job is due for another run.
Args:
job: The cron job rdfvalue object.
Returns:
True if it is time to run based on the specified frequency.
"""
if not job.enabled:
return False
if job.forced_run_requested:
return True
now = rdfvalue.RDFDatetime.Now()
if (
job.last_run_time is not None
and job.last_run_time + job.frequency > now
):
return False
# No currently executing job - lets go.
if not job.current_run_id:
return True
# There is a job executing but we allow overruns.
if job.allow_overruns:
return True
return False | Determines if the given job is due for another run.
Args:
job: The cron job rdfvalue object.
Returns:
True if it is time to run based on the specified frequency. | JobDueToRun | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def DeleteOldRuns(
self, cutoff_timestamp: Optional[rdfvalue.RDFDatetime] = None
) -> None:
"""Deletes runs that were started before the timestamp given."""
if cutoff_timestamp is None:
raise ValueError("cutoff_timestamp can't be None")
return data_store.REL_DB.DeleteOldCronJobRuns(
cutoff_timestamp=cutoff_timestamp
) | Deletes runs that were started before the timestamp given. | DeleteOldRuns | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def ScheduleSystemCronJobs(names: Optional[Sequence[str]] = None) -> None:
"""Schedules all system cron jobs."""
errors = []
disabled_classes = config.CONFIG["Cron.disabled_cron_jobs"]
for name in disabled_classes:
try:
SystemCronJobRegistry.CronJobClassByName(name)
except ValueError:
errors.append("Cron job not found: %s." % name)
continue
if names is None:
names = SystemCronJobRegistry.SYSTEM_CRON_REGISTRY.keys()
for name in names:
cls = SystemCronJobRegistry.CronJobClassByName(name)
enabled = cls.enabled and name not in disabled_classes
system = rdf_cronjobs.CronJobAction.ActionType.SYSTEM_CRON_ACTION
args = rdf_cronjobs.CronJobAction(
action_type=system,
system_cron_action=rdf_cronjobs.SystemCronAction(job_class_name=name),
)
# TODO: Refactor to proto-only.
rdf_job = rdf_cronjobs.CronJob(
cron_job_id=name,
args=args,
enabled=enabled,
frequency=cls.frequency,
lifetime=cls.lifetime,
allow_overruns=cls.allow_overruns,
created_at=rdfvalue.RDFDatetime.Now(),
)
proto_job = mig_cronjobs.ToProtoCronJob(rdf_job)
data_store.REL_DB.WriteCronJob(proto_job)
if errors:
raise ValueError(
"Error(s) while parsing Cron.disabled_cron_jobs: %s" % errors
) | Schedules all system cron jobs. | ScheduleSystemCronJobs | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def Run(self):
"""Runs a working thread and waits for it to finish."""
self.RunAsync().join() | Runs a working thread and waits for it to finish. | Run | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def RunAsync(self):
"""Runs a working thread and returns immediately."""
self.running_thread = threading.Thread(
name=self.thread_name, target=self._RunLoop
)
self.running_thread.daemon = True
self.running_thread.start()
return self.running_thread | Runs a working thread and returns immediately. | RunAsync | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def InitializeCronWorkerOnce():
"""Init hook for cron job worker."""
global _cron_worker
# Start the cron thread if configured to.
if config.CONFIG["Cron.active"]:
_cron_worker = CronWorker()
_cron_worker.RunAsync() | Init hook for cron job worker. | InitializeCronWorkerOnce | python | google/grr | grr/server/grr_response_server/cronjobs.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py | Apache-2.0 |
def SendGrrMessageThroughFleetspeak(
grr_id: str,
grr_msg: rdf_flows.GrrMessage,
) -> None:
"""Sends the given GrrMessage through FS with retrying.
The send operation is retried if a `grpc.RpcError` occurs.
The maximum number of retries corresponds to the config value
`Server.fleetspeak_send_retry_attempts`.
A retry is delayed by the number of seconds specified in the config value
`Server.fleetspeak_send_retry_sleep_time_secs`.
Args:
grr_id: ID of grr client to send message to.
grr_msg: GRR message to send.
"""
fs_msg = fs_common_pb2.Message(
message_type="GrrMessage",
destination=fs_common_pb2.Address(
client_id=GRRIDToFleetspeakID(grr_id), service_name="GRR"
),
)
fs_msg.data.Pack(grr_msg.AsPrimitiveProto())
if grr_msg.session_id is not None:
annotation = fs_msg.annotations.entries.add()
annotation.key, annotation.value = "flow_id", grr_msg.session_id.Basename()
if grr_msg.request_id is not None:
annotation = fs_msg.annotations.entries.add()
annotation.key, annotation.value = "request_id", str(grr_msg.request_id)
fleetspeak_connector.CONN.outgoing.InsertMessage(
fs_msg,
single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT,
timeout=WRITE_TOTAL_TIMEOUT,
) | Sends the given GrrMessage through FS with retrying.
The send operation is retried if a `grpc.RpcError` occurs.
The maximum number of retries corresponds to the config value
`Server.fleetspeak_send_retry_attempts`.
A retry is delayed by the number of seconds specified in the config value
`Server.fleetspeak_send_retry_sleep_time_secs`.
Args:
grr_id: ID of grr client to send message to.
grr_msg: GRR message to send. | SendGrrMessageThroughFleetspeak | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def SendGrrMessageProtoThroughFleetspeak(
grr_id: str,
grr_msg: jobs_pb2.GrrMessage,
) -> None:
"""Sends the given GrrMessage through FS with retrying.
The send operation is retried if a `grpc.RpcError` occurs.
The maximum number of retries corresponds to the config value
`Server.fleetspeak_send_retry_attempts`.
A retry is delayed by the number of seconds specified in the config value
`Server.fleetspeak_send_retry_sleep_time_secs`.
Args:
grr_id: ID of grr client to send message to.
grr_msg: GRR message to send.
"""
fs_msg = fs_common_pb2.Message(
message_type="GrrMessage",
destination=fs_common_pb2.Address(
client_id=GRRIDToFleetspeakID(grr_id), service_name="GRR"
),
)
fs_msg.data.Pack(grr_msg)
if grr_msg.session_id is not None:
annotation = fs_msg.annotations.entries.add()
annotation.key = "flow_id"
annotation.value = rdfvalue.FlowSessionID(grr_msg.session_id).Basename()
if grr_msg.request_id is not None:
annotation = fs_msg.annotations.entries.add()
annotation.key = "request_id"
annotation.value = str(grr_msg.request_id)
fleetspeak_connector.CONN.outgoing.InsertMessage(
fs_msg,
single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT,
timeout=WRITE_TOTAL_TIMEOUT,
) | Sends the given GrrMessage through FS with retrying.
The send operation is retried if a `grpc.RpcError` occurs.
The maximum number of retries corresponds to the config value
`Server.fleetspeak_send_retry_attempts`.
A retry is delayed by the number of seconds specified in the config value
`Server.fleetspeak_send_retry_sleep_time_secs`.
Args:
grr_id: ID of grr client to send message to.
grr_msg: GRR message to send. | SendGrrMessageProtoThroughFleetspeak | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def SendRrgRequest(
client_id: str,
request: rrg_pb2.Request,
) -> None:
"""Sends a RRG action request to the specified endpoint.
Args:
client_id: A unique endpoint identifier as recognized by GRR.
request: A request to send to the endpoint.
"""
message = fs_common_pb2.Message()
message.message_type = "rrg.Request"
message.destination.service_name = "RRG"
message.destination.client_id = GRRIDToFleetspeakID(client_id)
message.data.Pack(request)
# It is not entirely clear to me why we set these annotations below, but
# messages sent to Python agents do it, so we should do it as well.
message.annotations.entries.add(
key="flow_id",
value=str(request.flow_id),
)
message.annotations.entries.add(
key="request_id",
value=str(request.request_id),
)
fleetspeak_connector.CONN.outgoing.InsertMessage(
message,
single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT,
timeout=WRITE_TOTAL_TIMEOUT,
) | Sends a RRG action request to the specified endpoint.
Args:
client_id: A unique endpoint identifier as recognized by GRR.
request: A request to send to the endpoint. | SendRrgRequest | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def KillFleetspeak(grr_id: str, force: bool) -> None:
"""Kills Fleespeak on the given client."""
die_req = fs_system_pb2.DieRequest(force=force)
fs_msg = fs_common_pb2.Message()
fs_msg.message_type = "Die"
fs_msg.destination.client_id = GRRIDToFleetspeakID(grr_id)
fs_msg.destination.service_name = "system"
fs_msg.data.Pack(die_req)
fleetspeak_connector.CONN.outgoing.InsertMessage(
fs_msg,
single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT,
timeout=WRITE_TOTAL_TIMEOUT,
) | Kills Fleespeak on the given client. | KillFleetspeak | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def RestartFleetspeakGrrService(grr_id: str) -> None:
"""Restarts the GRR service on the given client."""
restart_req = fs_system_pb2.RestartServiceRequest(name="GRR")
fs_msg = fs_common_pb2.Message()
fs_msg.message_type = "RestartService"
fs_msg.destination.client_id = GRRIDToFleetspeakID(grr_id)
fs_msg.destination.service_name = "system"
fs_msg.data.Pack(restart_req)
fleetspeak_connector.CONN.outgoing.InsertMessage(
fs_msg,
single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT,
timeout=WRITE_TOTAL_TIMEOUT,
) | Restarts the GRR service on the given client. | RestartFleetspeakGrrService | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def DeleteFleetspeakPendingMessages(grr_id: str) -> None:
"""Deletes fleetspeak messages pending for the given client."""
delete_req = admin_pb2.DeletePendingMessagesRequest()
delete_req.client_ids.append(GRRIDToFleetspeakID(grr_id))
fleetspeak_connector.CONN.outgoing.DeletePendingMessages(
delete_req,
single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT,
timeout=WRITE_TOTAL_TIMEOUT,
) | Deletes fleetspeak messages pending for the given client. | DeleteFleetspeakPendingMessages | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def TSToRDFDatetime(ts):
"""Convert a protobuf.Timestamp to an RDFDatetime."""
return rdfvalue.RDFDatetime(ts.seconds * 1000000 + ts.nanos // 1000) | Convert a protobuf.Timestamp to an RDFDatetime. | TSToRDFDatetime | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def GetLabelsFromFleetspeak(client_id):
"""Returns labels for a Fleetspeak-enabled client.
Fleetspeak-enabled clients delegate labeling to Fleetspeak, as opposed to
using labels in the GRR config.
Args:
client_id: Id of the client to fetch Fleetspeak labels for.
Returns:
A list of client labels.
"""
res = fleetspeak_connector.CONN.outgoing.ListClients(
admin_pb2.ListClientsRequest(client_ids=[GRRIDToFleetspeakID(client_id)]),
single_try_timeout=READ_SINGLE_TRY_TIMEOUT,
timeout=READ_TOTAL_TIMEOUT,
)
if not res.clients or not res.clients[0].labels:
return []
grr_labels = []
label_prefix = config.CONFIG["Server.fleetspeak_label_prefix"]
for fs_label in res.clients[0].labels:
if fs_label.service_name != "client" or (
label_prefix and not fs_label.label.startswith(label_prefix)
):
continue
try:
grr_labels.append(fleetspeak_connector.label_map[fs_label.label])
except KeyError:
grr_labels.append(fs_label.label)
return grr_labels | Returns labels for a Fleetspeak-enabled client.
Fleetspeak-enabled clients delegate labeling to Fleetspeak, as opposed to
using labels in the GRR config.
Args:
client_id: Id of the client to fetch Fleetspeak labels for.
Returns:
A list of client labels. | GetLabelsFromFleetspeak | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def FetchClientResourceUsageRecords(
client_id: str,
start_range: timestamp_pb2.Timestamp,
end_range: timestamp_pb2.Timestamp,
) -> List[resource_pb2.ClientResourceUsageRecord]:
"""Returns aggregated resource usage metrics of a client from Fleetspeak.
Args:
client_id: Id of the client to fetch Fleetspeak resource usage records for.
start_range: Start timestamp of range.
end_range: end timestamp of range.
Returns:
A list of client resource usage records retrieved from Fleetspeak.
"""
res = fleetspeak_connector.CONN.outgoing.FetchClientResourceUsageRecords(
admin_pb2.FetchClientResourceUsageRecordsRequest(
client_id=GRRIDToFleetspeakID(client_id),
start_timestamp=start_range,
end_timestamp=end_range,
),
single_try_timeout=READ_SINGLE_TRY_TIMEOUT,
timeout=READ_TOTAL_TIMEOUT,
)
if not res.records:
return []
return list(res.records) | Returns aggregated resource usage metrics of a client from Fleetspeak.
Args:
client_id: Id of the client to fetch Fleetspeak resource usage records for.
start_range: Start timestamp of range.
end_range: end timestamp of range.
Returns:
A list of client resource usage records retrieved from Fleetspeak. | FetchClientResourceUsageRecords | python | google/grr | grr/server/grr_response_server/fleetspeak_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py | Apache-2.0 |
def __init__(self, certificate=None, private_key=None):
"""Creates a communicator.
Args:
certificate: Our own certificate.
private_key: Our own private key.
"""
self.private_key = private_key
self.certificate = certificate
self._ClearServerCipherCache()
# A cache for encrypted ciphers
self.encrypted_cipher_cache = utils.FastStore(max_size=50000) | Creates a communicator.
Args:
certificate: Our own certificate.
private_key: Our own private key. | __init__ | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def EncodeMessageList(cls, message_list, packed_message_list):
"""Encode the MessageList into the packed_message_list rdfvalue."""
# By default uncompress
uncompressed_data = message_list.SerializeToBytes()
packed_message_list.message_list = uncompressed_data
compressed_data = zlib.compress(uncompressed_data)
# Only compress if it buys us something.
if len(compressed_data) < len(uncompressed_data):
packed_message_list.compression = (
rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION
)
packed_message_list.message_list = compressed_data | Encode the MessageList into the packed_message_list rdfvalue. | EncodeMessageList | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def _GetServerCipher(self):
"""Returns the cipher for self.server_name."""
if self.server_cipher is not None:
expiry = self.server_cipher_age + rdfvalue.Duration.From(1, rdfvalue.DAYS)
if expiry > rdfvalue.RDFDatetime.Now():
return self.server_cipher
remote_public_key = self._GetRemotePublicKey(self.server_name)
self.server_cipher = communicator.Cipher(
self.common_name, self.private_key, remote_public_key
)
self.server_cipher_age = rdfvalue.RDFDatetime.Now()
return self.server_cipher | Returns the cipher for self.server_name. | _GetServerCipher | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def EncodeMessages(
self,
message_list,
result,
destination=None,
timestamp=None,
api_version=3,
):
"""Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version.
"""
if api_version not in [3]:
raise RuntimeError(
"Unsupported api version: %s, expected 3." % api_version
)
remote_public_key = self._GetRemotePublicKey(destination)
cipher = communicator.Cipher(
self.common_name, self.private_key, remote_public_key
)
# Make a nonce for this transaction
if timestamp is None:
self.timestamp = timestamp = int(time.time() * 1000000)
packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp)
self.EncodeMessageList(message_list, packed_message_list)
result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata
# Include the encrypted cipher.
result.encrypted_cipher = cipher.encrypted_cipher
serialized_message_list = packed_message_list.SerializeToBytes()
# Encrypt the message symmetrically.
# New scheme cipher is signed plus hmac over message list.
result.packet_iv, result.encrypted = cipher.Encrypt(serialized_message_list)
# This is to support older endpoints.
result.hmac = cipher.HMAC(result.encrypted)
# Newer endpoints only look at this HMAC. It is recalculated for each packet
# in the session. Note that encrypted_cipher and encrypted_cipher_metadata
# do not change between all packets in this session.
result.full_hmac = cipher.HMAC(
result.encrypted,
result.encrypted_cipher,
result.encrypted_cipher_metadata,
result.packet_iv.SerializeToBytes(),
struct.pack("<I", api_version),
)
result.api_version = api_version
if isinstance(result, rdfvalue.RDFValue):
# Store the number of messages contained.
result.num_messages = len(message_list)
return timestamp | Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version. | EncodeMessages | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def DecryptMessage(self, encrypted_response):
"""Decrypt the serialized, encrypted string.
Args:
encrypted_response: A serialized and encrypted string.
Returns:
a Packed_Message_List rdfvalue
"""
try:
response_comms = rdf_flows.ClientCommunication.FromSerializedBytes(
encrypted_response
)
return self.DecodeMessages(response_comms)
except (
rdfvalue.DecodeError,
type_info.TypeValueError,
ValueError,
AttributeError,
) as e:
raise DecodingError("Error while decrypting messages: %s" % e) | Decrypt the serialized, encrypted string.
Args:
encrypted_response: A serialized and encrypted string.
Returns:
a Packed_Message_List rdfvalue | DecryptMessage | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def DecompressMessageList(cls, packed_message_list):
"""Decompress the message data from packed_message_list.
Args:
packed_message_list: A PackedMessageList rdfvalue with some data in it.
Returns:
a MessageList rdfvalue.
Raises:
DecodingError: If decompression fails.
"""
compression = packed_message_list.compression
if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED:
data = packed_message_list.message_list
elif (
compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION
):
try:
data = zlib.decompress(packed_message_list.message_list)
except zlib.error as e:
raise DecodingError("Failed to decompress: %s" % e)
else:
raise DecodingError("Compression scheme not supported")
try:
result = rdf_flows.MessageList.FromSerializedBytes(data)
except rdfvalue.DecodeError:
raise DecodingError("RDFValue parsing failed.")
return result | Decompress the message data from packed_message_list.
Args:
packed_message_list: A PackedMessageList rdfvalue with some data in it.
Returns:
a MessageList rdfvalue.
Raises:
DecodingError: If decompression fails. | DecompressMessageList | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def DecodeMessages(self, response_comms):
"""Extract and verify server message.
Args:
response_comms: A ClientCommunication rdfvalue
Returns:
list of messages and the CN where they came from.
Raises:
DecryptionError: If the message failed to decrypt properly.
"""
# Have we seen this cipher before?
cipher_verified = False
try:
cipher = self.encrypted_cipher_cache.Get(response_comms.encrypted_cipher)
GRR_ENCRYPTED_CIPHER_CACHE.Increment(fields=["hits"])
# Even though we have seen this encrypted cipher already, we should still
# make sure that all the other fields are sane and verify the HMAC.
cipher.VerifyReceivedHMAC(response_comms)
cipher_verified = True
# If we have the cipher in the cache, we know the source and
# should have a corresponding public key.
source = cipher.GetSource()
remote_public_key = self._GetRemotePublicKey(source)
except KeyError:
GRR_ENCRYPTED_CIPHER_CACHE.Increment(fields=["misses"])
cipher = communicator.ReceivedCipher(response_comms, self.private_key)
source = cipher.GetSource()
try:
remote_public_key = self._GetRemotePublicKey(source)
if cipher.VerifyCipherSignature(remote_public_key):
# At this point we know this cipher is legit, we can cache it.
self.encrypted_cipher_cache.Put(
response_comms.encrypted_cipher, cipher
)
cipher_verified = True
except UnknownClientCertError:
# We don't know who we are talking to.
remote_public_key = None
# Decrypt the message with the per packet IV.
plain = cipher.Decrypt(response_comms.encrypted, response_comms.packet_iv)
try:
packed_message_list = rdf_flows.PackedMessageList.FromSerializedBytes(
plain
)
except rdfvalue.DecodeError as e:
raise DecryptionError(e)
message_list = self.DecompressMessageList(packed_message_list)
# Are these messages authenticated?
# fmt: off
auth_state = self.VerifyMessageSignature(
response_comms,
packed_message_list,
cipher,
cipher_verified,
response_comms.api_version,
remote_public_key)
# fmt: on
# Mark messages as authenticated and where they came from.
for msg in message_list.job:
msg.auth_state = auth_state
msg.source = cipher.cipher_metadata.source
return (
message_list.job,
cipher.cipher_metadata.source,
packed_message_list.timestamp,
) | Extract and verify server message.
Args:
response_comms: A ClientCommunication rdfvalue
Returns:
list of messages and the CN where they came from.
Raises:
DecryptionError: If the message failed to decrypt properly. | DecodeMessages | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def VerifyMessageSignature(
self,
unused_response_comms,
packed_message_list,
cipher,
cipher_verified,
api_version,
remote_public_key,
):
"""Verify the message list signature.
This is the way the messages are verified in the client.
In the client we also check that the nonce returned by the server is correct
(the timestamp doubles as a nonce). If the nonce fails we deem the response
unauthenticated since it might have resulted from a replay attack.
Args:
packed_message_list: The PackedMessageList rdfvalue from the server.
cipher: The cipher belonging to the remote end.
cipher_verified: If True, the cipher's signature is not verified again.
api_version: The api version we should use.
remote_public_key: The public key of the source.
Returns:
An rdf_flows.GrrMessage.AuthorizationState.
Raises:
DecryptionError: if the message is corrupt.
"""
# This is not used atm since we only support a single api version (3).
_ = api_version
result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED
if cipher_verified or cipher.VerifyCipherSignature(remote_public_key):
GRR_AUTHENTICATED_MESSAGES.Increment()
result = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
# Check for replay attacks. We expect the server to return the same
# timestamp nonce we sent.
if packed_message_list.timestamp != self.timestamp: # pytype: disable=attribute-error
result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED
if not cipher.cipher_metadata:
# Fake the metadata
cipher.cipher_metadata = rdf_flows.CipherMetadata(
source=packed_message_list.source
)
return result | Verify the message list signature.
This is the way the messages are verified in the client.
In the client we also check that the nonce returned by the server is correct
(the timestamp doubles as a nonce). If the nonce fails we deem the response
unauthenticated since it might have resulted from a replay attack.
Args:
packed_message_list: The PackedMessageList rdfvalue from the server.
cipher: The cipher belonging to the remote end.
cipher_verified: If True, the cipher's signature is not verified again.
api_version: The api version we should use.
remote_public_key: The public key of the source.
Returns:
An rdf_flows.GrrMessage.AuthorizationState.
Raises:
DecryptionError: if the message is corrupt. | VerifyMessageSignature | python | google/grr | grr/server/grr_response_server/communicator.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py | Apache-2.0 |
def ReceiveFetchedFileStat(
self,
stat_entry: rdf_client_fs.StatEntry,
request_data: Optional[Mapping[str, Any]] = None,
):
"""This method will be called for each new file stat successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
del request_data # Unused.
if self.state.stop_at_stat:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1
else:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGRESS
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry, status=status
)
self.SendReply(result) | This method will be called for each new file stat successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call. | ReceiveFetchedFileStat | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def ReceiveFetchedFileHash(
self,
stat_entry: rdf_client_fs.StatEntry,
file_hash: rdf_crypto.Hash,
request_data: Optional[Mapping[str, Any]] = None,
):
"""This method will be called for each new file hash successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
del request_data # Unused.
if self.state.stop_at_hash:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1
else:
status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGRESS
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry, hash=file_hash, status=status
)
self.SendReply(result) | This method will be called for each new file hash successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call. | ReceiveFetchedFileHash | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def ReceiveFetchedFile(
self,
stat_entry: rdf_client_fs.StatEntry,
file_hash: rdf_crypto.Hash,
request_data: Optional[Mapping[str, Any]] = None,
is_duplicate: bool = False,
):
"""This method will be called for each new file successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
is_duplicate: If True, the file wasn't actually collected as its hash was
found in the filestore.
"""
del request_data, is_duplicate # Unused.
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=stat_entry,
hash=file_hash,
status=rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED,
)
self.SendReply(result)
self.state.progress.num_in_progress -= 1
self.state.progress.num_collected += 1 | This method will be called for each new file successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
is_duplicate: If True, the file wasn't actually collected as its hash was
found in the filestore. | ReceiveFetchedFile | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def FileFetchFailed(
self,
pathspec: rdf_paths.PathSpec,
request_data: Optional[Mapping[str, Any]] = None,
status: Optional[rdf_flow_objects.FlowStatus] = None,
):
"""This method will be called when stat or hash requests fail.
Args:
pathspec: Pathspec of a file that failed to be fetched.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
status: FlowStatus that contains more error details.
"""
requested_pathspec = request_data["requested_pathspec"]
if (
self.client_os == "Windows"
and pathspec.pathtype == rdf_paths.PathSpec.PathType.OS
):
# Retry with raw filesystem access on Windows, the file might be locked
# for reads.
raw_pathspec = rdf_paths.PathSpec(
path=requested_pathspec.path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"],
)
self.StartFileFetch(
raw_pathspec, request_data=dict(requested_pathspec=raw_pathspec)
)
self.state.progress.num_raw_fs_access_retries += 1
else:
if status is not None and status.error_message:
error_description = "{} when fetching {} with {}".format(
status.error_message, pathspec.path, pathspec.pathtype
)
# TODO: This is a really bad hack and should be fixed by
# passing the 'not found' status in a more structured way.
if "File not found" in status.error_message:
file_status = (
rdf_file_finder.CollectFilesByKnownPathResult.Status.NOT_FOUND
)
else:
file_status = (
rdf_file_finder.CollectFilesByKnownPathResult.Status.FAILED
)
else:
error_description = (
"File {} could not be fetched with {} due to an unknown error. "
"Check the flow logs.".format(pathspec.path, pathspec.pathtype)
)
file_status = (
rdf_file_finder.CollectFilesByKnownPathResult.Status.FAILED
)
result = rdf_file_finder.CollectFilesByKnownPathResult(
stat=rdf_client_fs.StatEntry(pathspec=requested_pathspec),
error=error_description,
status=file_status,
)
self.SendReply(result)
self.state.progress.num_in_progress -= 1
self.state.progress.num_failed += 1 | This method will be called when stat or hash requests fail.
Args:
pathspec: Pathspec of a file that failed to be fetched.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
status: FlowStatus that contains more error details. | FileFetchFailed | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def Start(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""See base class."""
super().Start(file_size=self.MAX_FILE_SIZE)
self.state.progress = rdf_file_finder.CollectMultipleFilesProgress(
num_found=0,
num_in_progress=0,
num_raw_fs_access_retries=0,
num_collected=0,
num_failed=0,
)
conditions = BuildClientFileFinderConditions(
modification_time=self.args.modification_time
if self.args.HasField("modification_time")
else None,
access_time=self.args.access_time
if self.args.HasField("access_time")
else None,
inode_change_time=self.args.inode_change_time
if self.args.HasField("inode_change_time")
else None,
size=self.args.size if self.args.HasField("size") else None,
ext_flags=self.args.ext_flags
if self.args.HasField("ext_flags")
else None,
contents_regex_match=self.args.contents_regex_match
if self.args.HasField("contents_regex_match")
else None,
contents_literal_match=self.args.contents_literal_match
if self.args.HasField("contents_literal_match")
else None,
)
file_finder_args = rdf_file_finder.FileFinderArgs(
paths=self.args.path_expressions,
pathtype=rdf_paths.PathSpec.PathType.OS,
conditions=conditions,
action=rdf_file_finder.FileFinderAction.Stat(),
)
self.CallFlow(
file_finder.ClientFileFinder.__name__,
flow_args=file_finder_args,
next_state=self.ProcessFiles.__name__,
) | See base class. | Start | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def ReceiveFetchedFile(
self, stat_entry, hash_obj, request_data=None, is_duplicate=False
):
"""See MultiGetFileLogic."""
del request_data, is_duplicate # Unused.
self.state.progress.num_in_progress = max(
0, self.state.progress.num_in_progress - 1
)
self.state.progress.num_collected += 1
result = rdf_file_finder.CollectMultipleFilesResult(
stat=stat_entry,
hash=hash_obj,
status=rdf_file_finder.CollectMultipleFilesResult.Status.COLLECTED,
)
self.SendReply(result) | See MultiGetFileLogic. | ReceiveFetchedFile | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def FileFetchFailed(
self,
pathspec: rdf_paths.PathSpec,
request_data: Any = None,
status: Optional[rdf_flow_objects.FlowStatus] = None,
):
"""See MultiGetFileLogic."""
original_pathspec = pathspec
if request_data is not None and request_data["original_pathspec"]:
original_pathspec = request_data["original_pathspec"]
if (
self.client_os == "Windows"
and pathspec.pathtype == rdf_paths.PathSpec.PathType.OS
):
# Retry with raw filesystem access on Windows,
# the file might be locked for reads.
raw_pathspec = rdf_paths.PathSpec(
path=original_pathspec.path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"],
)
self.StartFileFetch(
raw_pathspec, request_data=dict(original_pathspec=raw_pathspec)
)
self.state.progress.num_raw_fs_access_retries += 1
else:
if status is not None and status.error_message:
error_description = "{} when fetching {} with {}".format(
status.error_message, pathspec.path, pathspec.pathtype
)
else:
error_description = (
"File {} could not be fetched with {} due to an unknown error. "
"Check the flow logs.".format(pathspec.path, pathspec.pathtype)
)
self.state.progress.num_in_progress = max(
0, self.state.progress.num_in_progress - 1
)
self.state.progress.num_failed += 1
result = rdf_file_finder.CollectMultipleFilesResult(
stat=rdf_client_fs.StatEntry(pathspec=original_pathspec),
error=error_description,
status=rdf_file_finder.CollectMultipleFilesResult.Status.FAILED,
)
self.SendReply(result) | See MultiGetFileLogic. | FileFetchFailed | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def Start(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""See base class."""
super().Start(file_size=self.MAX_FILE_SIZE)
self.state.progress = rdf_file_finder.HashMultipleFilesProgress(
num_found=0,
num_in_progress=0,
num_raw_fs_access_retries=0,
num_hashed=0,
num_failed=0,
)
# Set the collection level for MultiGetFileLogic mixin, as the default
# one is collecting the file contents
self.state.stop_at_hash = True
conditions = BuildClientFileFinderConditions(
modification_time=self.args.modification_time
if self.args.HasField("modification_time")
else None,
access_time=self.args.access_time
if self.args.HasField("access_time")
else None,
inode_change_time=self.args.inode_change_time
if self.args.HasField("inode_change_time")
else None,
size=self.args.size if self.args.HasField("size") else None,
ext_flags=self.args.ext_flags
if self.args.HasField("ext_flags")
else None,
contents_regex_match=self.args.contents_regex_match
if self.args.HasField("contents_regex_match")
else None,
contents_literal_match=self.args.contents_literal_match
if self.args.HasField("contents_literal_match")
else None,
)
file_finder_args = rdf_file_finder.FileFinderArgs(
paths=self.args.path_expressions,
pathtype=rdf_paths.PathSpec.PathType.OS,
conditions=conditions,
action=rdf_file_finder.FileFinderAction.Hash(),
)
self.CallFlow(
file_finder.ClientFileFinder.__name__,
flow_args=file_finder_args,
next_state=self.ProcessResponses.__name__,
) | See base class. | Start | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def FileFetchFailed(
self,
pathspec: rdf_paths.PathSpec,
request_data: Any = None,
status: Optional[rdf_flow_objects.FlowStatus] = None,
):
"""See MultiGetFileLogic."""
original_pathspec = pathspec
if request_data is not None and request_data["original_pathspec"]:
original_pathspec = request_data["original_pathspec"]
if (
self.client_os == "Windows"
and pathspec.pathtype == rdf_paths.PathSpec.PathType.OS
):
# Retry with raw filesystem access on Windows,
# the file might be locked for reads.
raw_pathspec = rdf_paths.PathSpec(
path=original_pathspec.path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"],
)
self.StartFileFetch(
raw_pathspec, request_data=dict(original_pathspec=raw_pathspec)
)
self.state.progress.num_raw_fs_access_retries += 1
else:
if status is not None and status.error_message:
error_description = "{} when fetching {} with {}".format(
status.error_message, pathspec.path, pathspec.pathtype
)
else:
error_description = (
"File {} could not be fetched with {} due to an unknown error. "
"Check the flow logs.".format(pathspec.path, pathspec.pathtype)
)
self.state.progress.num_in_progress = max(
0, self.state.progress.num_in_progress - 1
)
self.state.progress.num_failed += 1
result = rdf_file_finder.CollectMultipleFilesResult(
stat=rdf_client_fs.StatEntry(pathspec=original_pathspec),
error=error_description,
status=rdf_file_finder.CollectMultipleFilesResult.Status.FAILED,
)
self.SendReply(result) | See MultiGetFileLogic. | FileFetchFailed | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def ReceiveFetchedFileHash(
self,
stat_entry: rdf_client_fs.StatEntry,
file_hash: rdf_crypto.Hash,
request_data: Optional[Mapping[str, Any]] = None,
):
"""This method will be called for each new file hash successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
"""
del request_data # Unused.
self.state.progress.num_in_progress -= 1
self.state.progress.num_hashed += 1
result = rdf_file_finder.CollectMultipleFilesResult(
stat=stat_entry,
hash=file_hash,
status=rdf_file_finder.CollectMultipleFilesResult.Status.COLLECTED,
)
self.SendReply(result) | This method will be called for each new file hash successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call. | ReceiveFetchedFileHash | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def BuildClientFileFinderConditions(
modification_time: Optional[
rdf_file_finder.FileFinderModificationTimeCondition
] = None,
access_time: Optional[rdf_file_finder.FileFinderAccessTimeCondition] = None,
inode_change_time: Optional[
rdf_file_finder.FileFinderInodeChangeTimeCondition
] = None,
size: Optional[rdf_file_finder.FileFinderSizeCondition] = None,
ext_flags: Optional[rdf_file_finder.FileFinderExtFlagsCondition] = None,
contents_regex_match: Optional[
rdf_file_finder.FileFinderContentsRegexMatchCondition
] = None,
contents_literal_match: Optional[
rdf_file_finder.FileFinderContentsLiteralMatchCondition
] = None,
) -> list[rdf_file_finder.FileFinderCondition]:
"""Constructs the list of conditions to be applied to ClientFileFinder flow.
Args:
modification_time: Min/max last modification time of the file(s).
access_time: Min/max last access time of the file(s).
inode_change_time: Min/max last inode time of the file(s).
size: Min/max file size.
ext_flags: Linux and/or macOS file flags.
contents_regex_match: regex rule to match in the file contents.
contents_literal_match: string literal to match in the file contents.
Returns:
List of file conditions for ClientFileFinder flow.
"""
conditions = []
if modification_time is not None:
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.MODIFICATION_TIME,
modification_time=modification_time,
)
)
if access_time is not None:
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.ACCESS_TIME,
access_time=access_time,
)
)
if inode_change_time is not None:
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.INODE_CHANGE_TIME,
inode_change_time=inode_change_time,
)
)
if size is not None:
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.SIZE,
size=size,
)
)
if ext_flags is not None:
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.EXT_FLAGS,
ext_flags=ext_flags,
)
)
if contents_regex_match is not None:
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.CONTENTS_REGEX_MATCH,
contents_regex_match=contents_regex_match,
)
)
if contents_literal_match is not None:
conditions.append(
rdf_file_finder.FileFinderCondition(
condition_type=rdf_file_finder.FileFinderCondition.Type.CONTENTS_LITERAL_MATCH,
contents_literal_match=contents_literal_match,
)
)
return conditions | Constructs the list of conditions to be applied to ClientFileFinder flow.
Args:
modification_time: Min/max last modification time of the file(s).
access_time: Min/max last access time of the file(s).
inode_change_time: Min/max last inode time of the file(s).
size: Min/max file size.
ext_flags: Linux and/or macOS file flags.
contents_regex_match: regex rule to match in the file contents.
contents_literal_match: string literal to match in the file contents.
Returns:
List of file conditions for ClientFileFinder flow. | BuildClientFileFinderConditions | python | google/grr | grr/server/grr_response_server/flows/file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py | Apache-2.0 |
def GetOutputPlugins(self):
"""Returns list of OutputPluginDescriptor objects to be used in the hunt.
This method can be overridden in a subclass in the server/local directory to
apply plugins specific to the local installation.
Returns:
list of rdf_output_plugin.OutputPluginDescriptor objects
"""
return [] | Returns list of OutputPluginDescriptor objects to be used in the hunt.
This method can be overridden in a subclass in the server/local directory to
apply plugins specific to the local installation.
Returns:
list of rdf_output_plugin.OutputPluginDescriptor objects | GetOutputPlugins | python | google/grr | grr/server/grr_response_server/flows/cron/system.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/cron/system.py | Apache-2.0 |
def StartInterrogationHunt(self):
"""Starts an interrogation hunt on all available clients."""
flow_name = flows_discovery.Interrogate.__name__
flow_args = flows_discovery.InterrogateArgs(lightweight=False)
description = "Interrogate run by cron to keep host info fresh."
hunt_id = hunt.CreateAndStartHunt(
flow_name,
flow_args,
self.username,
client_limit=0,
client_rate=config.CONFIG["Cron.interrogate_client_rate"],
crash_limit=config.CONFIG["Cron.interrogate_crash_limit"],
description=description,
duration=config.CONFIG["Cron.interrogate_duration"],
output_plugins=self.GetOutputPlugins())
self.Log("Started hunt %s.", hunt_id) | Starts an interrogation hunt on all available clients. | StartInterrogationHunt | python | google/grr | grr/server/grr_response_server/flows/cron/system.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/cron/system.py | Apache-2.0 |
def Start(self) -> None:
if self.client_os == "Linux":
if self.rrg_support:
signed_command = data_store.REL_DB.ReadSignedCommand(
"dmidecode_q",
operating_system=signed_commands_pb2.SignedCommand.OS.LINUX,
)
self.CallRRG(
action=rrg_pb2.EXECUTE_SIGNED_COMMAND,
args=rrg_execute_signed_command_pb2.Args(
command=signed_command.command,
command_ed25519_signature=signed_command.ed25519_signature,
),
next_state=self._ProcessRRGDmidecodeResults.__name__,
)
else:
dmidecode_args = jobs_pb2.ExecuteRequest()
dmidecode_args.cmd = "/usr/sbin/dmidecode"
dmidecode_args.args.append("-q")
self.CallClientProto(
server_stubs.ExecuteCommand,
dmidecode_args,
next_state=self._ProcessDmidecodeResults.__name__,
)
elif self.client_os == "Windows":
if self.rrg_support:
self.CallRRG(
action=rrg_pb2.QUERY_WMI,
args=rrg_query_wmi_pb2.Args(
query="""
SELECT *
FROM Win32_ComputerSystemProduct
""",
),
next_state=self._ProcessRRGComputerSystemProductResults.__name__,
)
else:
win32_computer_system_product_args = jobs_pb2.WMIRequest()
win32_computer_system_product_args.query = """
SELECT *
FROM Win32_ComputerSystemProduct
""".strip()
self.CallClientProto(
server_stubs.WmiQuery,
win32_computer_system_product_args,
next_state=self._ProcessWin32ComputerSystemProductResults.__name__,
)
elif self.client_os == "Darwin":
if self.rrg_support:
signed_command = data_store.REL_DB.ReadSignedCommand(
"system_profiler_xml_sphardware",
operating_system=signed_commands_pb2.SignedCommand.OS.MACOS,
)
self.CallRRG(
action=rrg_pb2.EXECUTE_SIGNED_COMMAND,
args=rrg_execute_signed_command_pb2.Args(
command=signed_command.command,
command_ed25519_signature=signed_command.ed25519_signature,
),
next_state=self._ProcessRRGSystemProfilerResults.__name__,
)
else:
system_profiler_args = jobs_pb2.ExecuteRequest()
system_profiler_args.cmd = "/usr/sbin/system_profiler"
system_profiler_args.args.append("-xml")
system_profiler_args.args.append("SPHardwareDataType")
self.CallClientProto(
server_stubs.ExecuteCommand,
system_profiler_args,
next_state=self._ProcessSystemProfilerResults.__name__,
)
else:
message = f"Unsupported operating system: {self.client_os}"
raise flow_base.FlowError(message) | ,
),
next_state=self._ProcessRRGComputerSystemProductResults.__name__,
)
else:
win32_computer_system_product_args = jobs_pb2.WMIRequest()
win32_computer_system_product_args.query = | Start | python | google/grr | grr/server/grr_response_server/flows/general/hardware.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/hardware.py | Apache-2.0 |
def _ParseDmidecodeStdout(stdout: bytes) -> sysinfo_pb2.HardwareInfo:
"""Parses standard output of the `/usr/bin/dmidecode` command."""
result = sysinfo_pb2.HardwareInfo()
stdout = stdout.decode("utf-8", "backslashreplace")
lines = iter(stdout.splitlines())
for line in lines:
line = line.strip()
if line == "System Information":
for line in lines:
if not line.strip():
# Blank line ends system information section.
break
elif match := re.fullmatch(r"\s*Serial Number:\s*(.*)", line):
result.serial_number = match[1]
elif match := re.fullmatch(r"\s*Manufacturer:\s*(.*)", line):
result.system_manufacturer = match[1]
elif match := re.fullmatch(r"\s*Product Name:\s*(.*)", line):
result.system_product_name = match[1]
elif match := re.fullmatch(r"\s*UUID:\s*(.*)", line):
result.system_uuid = match[1]
elif match := re.fullmatch(r"\s*SKU Number:\s*(.*)", line):
result.system_sku_number = match[1]
elif match := re.fullmatch(r"\s*Family:\s*(.*)", line):
result.system_family = match[1]
elif match := re.fullmatch(r"\s*Asset Tag:\s*(.*)", line):
result.system_assettag = match[1]
elif line == "BIOS Information":
for line in lines:
if not line.strip():
# Blank link ends BIOS information section.
break
elif match := re.fullmatch(r"^\s*Vendor:\s*(.*)", line):
result.bios_vendor = match[1]
elif match := re.fullmatch(r"^\s*Version:\s*(.*)", line):
result.bios_version = match[1]
elif match := re.fullmatch(r"^\s*Release Date:\s*(.*)", line):
result.bios_release_date = match[1]
elif match := re.fullmatch(r"^\s*ROM Size:\s*(.*)", line):
result.bios_rom_size = match[1]
elif match := re.fullmatch(r"^\s*BIOS Revision:\s*(.*)", line):
result.bios_revision = match[1]
return result | Parses standard output of the `/usr/bin/dmidecode` command. | _ParseDmidecodeStdout | python | google/grr | grr/server/grr_response_server/flows/general/hardware.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/hardware.py | Apache-2.0 |
def _ParseSystemProfilerStdout(stdout: bytes) -> sysinfo_pb2.HardwareInfo:
"""Parses standard output of the `/usr/sbin/system_profiler` command."""
try:
plist = plistlib.loads(stdout)
except plistlib.InvalidFileException as error:
raise flow_base.FlowError(
f"Failed to parse system profiler output: {error}",
)
if not isinstance(plist, list):
raise flow_base.FlowError(
f"Unexpected type of system profiler output: {type(plist)}",
)
if len(plist) != 1:
raise flow_base.FlowError(
f"Unexpected length of system profiler output: {len(plist)}",
)
if not (items := plist[0].get("_items")):
raise flow_base.FlowError(
"`_items` property missing in system profiler output",
)
if not isinstance(items, list):
raise flow_base.FlowError(
f"Unexpected type of system profiler items: {type(items)}",
)
if len(items) != 1:
raise flow_base.FlowError(
f"Unexpected number of system profiler items: {len(items)}",
)
item = items[0]
if not isinstance(item, dict):
raise flow_base.FlowError(
f"Unexpected type of system profiler item: {type(item)}",
)
result = sysinfo_pb2.HardwareInfo()
if serial_number := item.get("serial_number"):
result.serial_number = serial_number
if machine_model := item.get("machine_model"):
result.system_product_name = machine_model
if boot_rom_version := item.get("boot_rom_version"):
result.bios_version = boot_rom_version
if platform_uuid := item.get("platform_UUID"):
result.system_uuid = platform_uuid
return result | Parses standard output of the `/usr/sbin/system_profiler` command. | _ParseSystemProfilerStdout | python | google/grr | grr/server/grr_response_server/flows/general/hardware.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/hardware.py | Apache-2.0 |
def Start(self):
"""Start processing."""
self.CallClientProto(
server_stubs.ListNetworkConnections,
flows_pb2.ListNetworkConnectionsArgs(
listening_only=self.proto_args.listening_only,
),
next_state=self.StoreNetstat.__name__,
) | Start processing. | Start | python | google/grr | grr/server/grr_response_server/flows/general/network.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/network.py | Apache-2.0 |
def StoreNetstat(
self,
responses: flow_responses.Responses[any_pb2.Any],
) -> None:
"""Collects the connections.
Args:
responses: A list of rdf_client_network.NetworkConnection objects.
Raises:
flow_base.FlowError: On failure to get retrieve the connections.
"""
if not responses.success:
raise flow_base.FlowError(
"Failed to get connections. Err: {0}".format(responses.status)
)
for response_any in responses:
response = sysinfo_pb2.NetworkConnection()
response_any.Unpack(response)
if (
self.proto_args.listening_only
and response.state != sysinfo_pb2.NetworkConnection.State.LISTEN
):
continue
self.SendReplyProto(response)
self.Log("Successfully wrote %d connections.", len(responses)) | Collects the connections.
Args:
responses: A list of rdf_client_network.NetworkConnection objects.
Raises:
flow_base.FlowError: On failure to get retrieve the connections. | StoreNetstat | python | google/grr | grr/server/grr_response_server/flows/general/network.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/network.py | Apache-2.0 |
def testNewArtifactLoaded(self):
"""Simulate a new artifact being loaded into the store via the UI."""
cmd_artifact = """name: "TestCmdArtifact"
doc: "Test command artifact for dpkg."
sources:
- type: "COMMAND"
attributes:
cmd: "/usr/bin/dpkg"
args: ["--list"]
labels: [ "Software" ]
supported_os: [ "Linux" ]
"""
no_datastore_artifact = """name: "NotInDatastore"
doc: "Test command artifact for dpkg."
sources:
- type: "COMMAND"
attributes:
cmd: "/usr/bin/dpkg"
args: ["--list"]
labels: [ "Software" ]
supported_os: [ "Linux" ]
"""
test_registry = artifact_registry.ArtifactRegistry()
test_registry.ClearRegistry()
test_registry._dirty = False
with mock.patch.object(artifact_registry, "REGISTRY", test_registry):
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
artifact_registry.REGISTRY.GetArtifact("TestCmdArtifact")
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
artifact_registry.REGISTRY.GetArtifact("NotInDatastore")
# Add artifact to datastore but not registry
for artifact_val in artifact_registry.REGISTRY.ArtifactsFromYaml(
cmd_artifact
):
data_store.REL_DB.WriteArtifact(
mig_artifacts.ToProtoArtifact(artifact_val)
)
# Add artifact to registry but not datastore
for artifact_val in artifact_registry.REGISTRY.ArtifactsFromYaml(
no_datastore_artifact
):
artifact_registry.REGISTRY.RegisterArtifact(
artifact_val, source="datastore", overwrite_if_exists=False
)
# We need to reload all artifacts from the data store before trying to get
# the artifact.
artifact_registry.REGISTRY.ReloadDatastoreArtifacts()
self.assertTrue(artifact_registry.REGISTRY.GetArtifact("TestCmdArtifact"))
# We registered this artifact with datastore source but didn't
# write it into aff4. This simulates an artifact that was
# uploaded in the UI then later deleted. We expect it to get
# cleared when the artifacts are reloaded from the datastore.
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
artifact_registry.REGISTRY.GetArtifact("NotInDatastore") | Simulate a new artifact being loaded into the store via the UI. | testNewArtifactLoaded | python | google/grr | grr/server/grr_response_server/flows/general/collectors_interactions_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/collectors_interactions_test.py | Apache-2.0 |
def testCollectRunKeyBinaries(self):
"""Read Run key from the client_fixtures to test parsing and storage."""
client_id = self.SetupClient(0, system="Windows", os_version="6.2")
with vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, vfs_test_lib.FakeFullVFSHandler
):
client_mock = action_mocks.InterrogatedClient()
# Get KB initialized
session_id = flow_test_lib.StartAndRunFlow(
artifact.KnowledgeBaseInitializationFlow,
client_mock,
client_id=client_id,
creator=self.test_username,
)
kb = flow_test_lib.GetFlowResults(client_id, session_id)[0]
client = data_store.REL_DB.ReadClientSnapshot(client_id)
client.knowledge_base.CopyFrom(mig_client.ToProtoKnowledgeBase(kb))
data_store.REL_DB.WriteClientSnapshot(client)
with test_lib.Instrument(
transfer.MultiGetFile, "Start"
) as getfile_instrument:
# Run the flow in the emulated way.
flow_test_lib.StartAndRunFlow(
registry.CollectRunKeyBinaries,
client_mock,
client_id=client_id,
creator=self.test_username,
)
# Check MultiGetFile got called for our runkey file
download_requested = False
for pathspec in getfile_instrument.args[0][0].args.pathspecs:
if pathspec.path == "C:\\Windows\\TEMP\\A.exe":
download_requested = True
self.assertTrue(download_requested) | Read Run key from the client_fixtures to test parsing and storage. | testCollectRunKeyBinaries | python | google/grr | grr/server/grr_response_server/flows/general/registry_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/registry_test.py | Apache-2.0 |
def Start(self):
"""Start processing."""
self.CallClientProto(
server_stubs.ListProcesses, next_state=self.IterateProcesses.__name__
) | Start processing. | Start | python | google/grr | grr/server/grr_response_server/flows/general/processes.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/processes.py | Apache-2.0 |
def IterateProcesses(
self, responses_any: flow_responses.Responses[any_pb2.Any]
) -> None:
"""This stores the processes."""
if not responses_any.success:
# Check for error, but continue. Errors are common on client.
raise flow_base.FlowError(
"Error during process listing %s" % responses_any.status
)
responses = []
for response_any in responses_any:
response = sysinfo_pb2.Process()
response_any.Unpack(response)
responses.append(response)
if self.proto_args.pids:
pids = set(self.proto_args.pids)
responses = [p for p in responses if p.pid in pids]
if self.proto_args.fetch_binaries:
# Filter out processes entries without "exe" attribute and
# deduplicate the list.
paths_to_fetch = set()
for p in responses:
if p.exe and self._FilenameMatch(p) and self._ConnectionStateMatch(p):
paths_to_fetch.add(p.exe)
paths_to_fetch = sorted(paths_to_fetch)
self.Log(
"Got %d processes, fetching binaries for %d...",
len(responses),
len(paths_to_fetch),
)
self.CallFlowProto(
file_finder.ClientFileFinder.__name__,
flow_args=flows_pb2.FileFinderArgs(
paths=paths_to_fetch,
action=flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.Action.DOWNLOAD
),
),
next_state=self.HandleDownloadedFiles.__name__,
)
else:
# Only send the list of processes if we don't fetch the binaries
skipped = 0
for p in responses:
# It's normal to have lots of sleeping processes with no executable path
# associated.
if p.exe:
if self._FilenameMatch(p) and self._ConnectionStateMatch(p):
self.SendReplyProto(p)
else:
if self.args.connection_states:
if self._ConnectionStateMatch(p):
self.SendReplyProto(p)
else:
skipped += 1
if skipped:
self.Log("Skipped %s entries, missing path for regex" % skipped) | This stores the processes. | IterateProcesses | python | google/grr | grr/server/grr_response_server/flows/general/processes.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/processes.py | Apache-2.0 |
def HandleDownloadedFiles(
self, responses: flow_responses.Responses[any_pb2.Any]
) -> None:
"""Handle success/failure of the FileFinder flow."""
if not responses.success:
self.Log(
"Download of file %s failed %s",
responses.request_data["path"],
responses.status,
)
for response_any in responses:
response = flows_pb2.FileFinderResult()
response_any.Unpack(response)
self.Log("Downloaded %s", response.stat_entry.pathspec)
self.SendReplyProto(response.stat_entry) | Handle success/failure of the FileFinder flow. | HandleDownloadedFiles | python | google/grr | grr/server/grr_response_server/flows/general/processes.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/processes.py | Apache-2.0 |
def _GenerateResumableUploadURL(
self, bucket_name: str, blob_name: str, sa_email: str, expires_hours: int
) -> str:
"""Generates a v4 signed URL for uploading a blob using HTTP POST."""
auth_request = requests.Request()
credentials, project = google.auth.default()
storage_client = storage.Client(project, credentials)
bucket = storage_client.lookup_bucket(bucket_name)
blob = bucket.blob(blob_name)
expires_at = datetime.datetime.now() + datetime.timedelta(
hours=expires_hours
)
signing_credentials = compute_engine.IDTokenCredentials(
auth_request, "", service_account_email=sa_email
)
signed_url = blob.generate_signed_url(
version="v4",
expiration=expires_at,
method="POST",
content_type="application/octet-stream",
headers={
"X-Goog-Resumable": "start",
"Content-Type": "application/octet-stream",
},
credentials=signing_credentials,
)
return signed_url | Generates a v4 signed URL for uploading a blob using HTTP POST. | _GenerateResumableUploadURL | python | google/grr | grr/server/grr_response_server/flows/general/large_file.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/large_file.py | Apache-2.0 |
def setUp(self):
"""Make sure things are initialized."""
super().setUp()
patcher = artifact_test_lib.PatchDefaultArtifactRegistry()
patcher.start()
self.addCleanup(patcher.stop)
artifact_registry.REGISTRY.ClearSources()
artifact_registry.REGISTRY.ClearRegistry()
test_artifacts_file = os.path.join(
config.CONFIG["Test.data_dir"], "artifacts", "test_artifacts.json"
)
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
self.fakeartifact = artifact_registry.REGISTRY.GetArtifact("FakeArtifact")
self.fakeartifact2 = artifact_registry.REGISTRY.GetArtifact("FakeArtifact2")
self.output_count = 0 | Make sure things are initialized. | setUp | python | google/grr | grr/server/grr_response_server/flows/general/collectors_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/collectors_test.py | Apache-2.0 |
def testGetArtifact(self):
"""Test we can get a basic artifact."""
# Dynamically add an ArtifactSource specifying the base path.
file_path = os.path.join(self.base_path, "win_hello.exe")
coll1 = rdf_artifacts.ArtifactSource(
type=rdf_artifacts.ArtifactSource.SourceType.FILE,
attributes={"paths": [file_path]},
)
self.fakeartifact.sources.append(coll1)
self._GetArtifact("FakeArtifact") | Test we can get a basic artifact. | testGetArtifact | python | google/grr | grr/server/grr_response_server/flows/general/collectors_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/collectors_test.py | Apache-2.0 |
def testSupportedOS(self):
"""Test supported_os inside the collector object."""
client_id = self.SetupClient(0, system="Linux")
class FileFinderReturnsFoo(actions.ActionPlugin):
in_rdfvalue = rdf_file_finder.FileFinderArgs
out_rdfvalues = [rdf_file_finder.FileFinderResult]
def Run(self, args: rdf_file_finder.FileFinderArgs) -> None:
self.SendReply(
rdf_file_finder.FileFinderResult(
stat_entry=rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo",
pathtype=rdf_paths.PathSpec.PathType.OS,
)
)
)
)
client_mock = action_mocks.ActionMock.With({
"FileFinderOS": FileFinderReturnsFoo,
})
coll1 = rdf_artifacts.ArtifactSource(
type=rdf_artifacts.ArtifactSource.SourceType.PATH,
attributes={"paths": ["/foo"]},
supported_os=["Windows"],
)
self.fakeartifact.sources.append(coll1)
results = self._RunPathArtifact(client_id, client_mock, ["FakeArtifact"])
self.assertEmpty(results)
coll1.supported_os = ["Linux", "Windows"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
results = self._RunPathArtifact(client_id, client_mock, ["FakeArtifact"])
self.assertTrue(results)
coll1.supported_os = ["NotTrue"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
results = self._RunPathArtifact(client_id, client_mock, ["FakeArtifact"])
self.assertEmpty(results)
coll1.supported_os = ["Linux", "Windows"]
self.fakeartifact.supported_os = ["Linux"]
results = self._RunPathArtifact(client_id, client_mock, ["FakeArtifact"])
self.assertTrue(results)
self.fakeartifact.supported_os = ["Windows"]
results = self._RunPathArtifact(client_id, client_mock, ["FakeArtifact"])
self.assertEmpty(results) | Test supported_os inside the collector object. | testSupportedOS | python | google/grr | grr/server/grr_response_server/flows/general/collectors_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/collectors_test.py | Apache-2.0 |
def testSplitsResultsByArtifact(self):
"""Test that artifacts get split into separate collections."""
client_id = self.SetupClient(0, system="Linux")
class FileFinderReturnsFooBar(actions.ActionPlugin):
in_rdfvalue = rdf_file_finder.FileFinderArgs
out_rdfvalues = [rdf_file_finder.FileFinderResult]
def Run(self, args: rdf_file_finder.FileFinderArgs) -> None:
self.SendReply(
rdf_file_finder.FileFinderResult(
stat_entry=rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/test/bar",
pathtype=rdf_paths.PathSpec.PathType.OS,
)
)
)
)
coll1 = rdf_artifacts.ArtifactSource(
type=rdf_artifacts.ArtifactSource.SourceType.PATH,
attributes={"paths": ["/foo/bar"]},
)
self.fakeartifact.sources.append(coll1)
self.fakeartifact2.sources.append(coll1)
artifact_list = ["FakeArtifact", "FakeArtifact2"]
flow_id = flow_test_lib.StartAndRunFlow(
collectors.ArtifactCollectorFlow,
action_mocks.ActionMock.With({
"FileFinderOS": FileFinderReturnsFooBar,
}),
creator=self.test_username,
client_id=client_id,
flow_args=rdf_artifacts.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
split_output_by_artifact=True,
),
)
results_by_tag = flow_test_lib.GetFlowResultsByTag(client_id, flow_id)
self.assertCountEqual(
results_by_tag.keys(),
["artifact:FakeArtifact", "artifact:FakeArtifact2"],
) | Test that artifacts get split into separate collections. | testSplitsResultsByArtifact | python | google/grr | grr/server/grr_response_server/flows/general/collectors_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/collectors_test.py | Apache-2.0 |
def testSourceMeetsOSConditions(self):
"""Test we can get a GRR client artifact with conditions."""
knowledge_base = rdf_client.KnowledgeBase()
knowledge_base.os = "Windows"
# Run with unsupported OS.
source = rdf_artifacts.ArtifactSource(
type=rdf_artifacts.ArtifactSource.SourceType.PATH,
attributes={"paths": ["/test/foo"]},
supported_os=["Linux"],
)
self.assertFalse(collectors.MeetsOSConditions(knowledge_base, source))
# Run with supported OS.
source = rdf_artifacts.ArtifactSource(
type=rdf_artifacts.ArtifactSource.SourceType.PATH,
attributes={"paths": ["/test/foo"]},
supported_os=["Linux", "Windows"],
)
self.assertTrue(collectors.MeetsOSConditions(knowledge_base, source)) | Test we can get a GRR client artifact with conditions. | testSourceMeetsOSConditions | python | google/grr | grr/server/grr_response_server/flows/general/collectors_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/collectors_test.py | Apache-2.0 |
def Start(self):
"""Get information about the file from the client."""
self.state.max_chunk_number = max(
2, self.args.read_length // self.CHUNK_SIZE
)
self.state.current_chunk_number = 0
self.state.file_size = 0
self.state.blobs = []
self.state.stat_entry = None
self.state.num_bytes_collected = 0
self.state.target_pathspec = self.args.pathspec.Copy()
request = rdf_client_action.GetFileStatRequest(
pathspec=self.state.target_pathspec,
follow_symlink=True,
)
self.CallClient(
server_stubs.GetFileStat,
request,
next_state=self.Stat.__name__,
) | Get information about the file from the client. | Start | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def Stat(self, responses):
"""Fix up the pathspec of the file."""
response = responses.First()
file_size_known = True
if responses.success and response:
if stat.S_ISDIR(int(response.st_mode)):
raise ValueError("`GetFile` called on a directory")
if not stat.S_ISREG(int(response.st_mode)) and response.st_size == 0:
file_size_known = False
self.state.stat_entry = response
else:
if not self.args.ignore_stat_failure:
raise IOError("Error: %s" % responses.status)
# Just fill up a bogus stat entry.
self.state.stat_entry = rdf_client_fs.StatEntry(
pathspec=self.state.target_pathspec
)
file_size_known = False
# File size is not known, so we have to use user-provided read_length
# or pathspec.file_size_override to limit the amount of bytes we're
# going to try to read.
if not file_size_known:
if (
not self.state.target_pathspec.HasField("file_size_override")
and not self.args.read_length
):
raise ValueError(
"The file couldn't be stat-ed. Its size is not known."
" Either read_length or pathspec.file_size_override"
" has to be provided."
)
# This is not a regular file and the size is 0. Let's use read_length or
# file_size_override as a best guess for the file size.
if self.args.read_length == 0:
self.state.stat_entry.st_size = (
self.state.target_pathspec.file_size_override
)
else:
self.state.stat_entry.st_size = (
self.state.target_pathspec.offset + self.args.read_length
)
# Adjust the size from st_size if read length is not specified.
if self.args.read_length == 0:
self.state.file_size = max(
0,
self.state.stat_entry.st_size - self.state.stat_entry.pathspec.offset,
)
else:
self.state.file_size = self.args.read_length
if not self.state.target_pathspec.HasField("file_size_override"):
self.state.target_pathspec.file_size_override = (
self.state.target_pathspec.offset + self.args.read_length
)
self.state.max_chunk_number = (self.state.file_size // self.CHUNK_SIZE) + 1
self.FetchWindow(
min(
self.WINDOW_SIZE,
self.state.max_chunk_number - self.state["current_chunk_number"],
)
) | Fix up the pathspec of the file. | Stat | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def FetchWindow(self, number_of_chunks_to_readahead):
"""Read ahead a number of buffers to fill the window."""
for _ in range(number_of_chunks_to_readahead):
# Do not read past the end of file
next_offset = self.state.current_chunk_number * self.CHUNK_SIZE
if next_offset >= self.state.file_size:
return
request = rdf_client.BufferReference(
pathspec=self.state.target_pathspec,
offset=next_offset,
length=min(self.state.file_size - next_offset, self.CHUNK_SIZE),
)
self.CallClient(
server_stubs.TransferBuffer,
request,
next_state=self.ReadBuffer.__name__,
)
self.state.current_chunk_number += 1 | Read ahead a number of buffers to fill the window. | FetchWindow | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def ReadBuffer(self, responses):
"""Read the buffer and write to the file."""
# Did it work?
if not responses.success:
return
response = responses.First()
if not response:
raise IOError("Missing hash for offset %s missing" % response.offset)
self.state.num_bytes_collected += response.length
if response.offset <= self.state.max_chunk_number * self.CHUNK_SIZE:
# Response.data is the hash of the block (32 bytes) and
# response.length is the length of the block.
self.state.blobs.append((response.data, response.length))
self.Log("Received blob hash %s", text.Hexify(response.data))
# Add one more chunk to the window.
self.FetchWindow(1) | Read the buffer and write to the file. | ReadBuffer | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def End(self) -> None:
"""Finalize reading the file."""
if self.state.num_bytes_collected >= 0:
self._AddFileToFileStore()
stat_entry = self.state.stat_entry
if self.state.num_bytes_collected >= self.state.file_size:
self.Log(
"File %s transferred successfully.",
stat_entry.AFF4Path(self.client_urn),
)
else:
self.Log(
"File %s transferred partially (%d bytes out of %d).",
stat_entry.AFF4Path(self.client_urn),
self.state.num_bytes_collected,
self.state.file_size,
)
# Notify any parent flows the file is ready to be used now.
self.SendReply(stat_entry)
else:
self.Log("File transfer failed.") | Finalize reading the file. | End | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def Start(
self, file_size=0, maximum_pending_files=1000, use_external_stores=True
):
"""Initialize our state."""
super().Start()
self.state.files_hashed = 0
self.state.use_external_stores = use_external_stores
self.state.file_size = file_size
self.state.files_to_fetch = 0
self.state.files_fetched = 0
self.state.files_skipped = 0
self.state.files_failed = 0
# Controls how far to go on the collection: stat, hash and collect contents.
# By default we go through the whole process (collecting file contents), but
# we can stop when we finish getting the stat or hash.
self.state.stop_at_stat = False
self.state.stop_at_hash = False
# Counter to batch up hash checking in the filestore
self.state.files_hashed_since_check = 0
# A dict of file trackers which are waiting to be stat'd.
# Keys are vfs urns and values are FileTrack instances. Values are
# copied to pending_hashes for download if not present in FileStore.
self.state.pending_stats: Mapping[int, Mapping[str, Any]] = {}
# A dict of file trackers which are waiting to be checked by the file
# store. Keys are vfs urns and values are FileTrack instances. Values are
# copied to pending_files for download if not present in FileStore.
self.state.pending_hashes = {}
# A dict of file trackers currently being fetched. Keys are vfs urns and
# values are FileTracker instances.
self.state.pending_files = {}
# The maximum number of files we are allowed to download concurrently.
self.state.maximum_pending_files = maximum_pending_files
# As pathspecs are added to the flow they are appended to this array. We
# then simply pass their index in this array as a surrogate for the full
# pathspec. This allows us to use integers to track pathspecs in dicts etc.
self.state.indexed_pathspecs = []
self.state.request_data_list = []
# The index of the next pathspec to start. Pathspecs are added to
# indexed_pathspecs and wait there until there are free trackers for
# them. When the number of pending_files falls below the
# "maximum_pending_files" count] = we increment this index and start of
# downloading another pathspec.
self.state.next_pathspec_to_start = 0
# Number of blob hashes we have received but not yet scheduled for download.
self.state.blob_hashes_pending = 0 | Initialize our state. | Start | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def StartFileFetch(self, pathspec, request_data=None):
"""The entry point for this flow mixin - Schedules new file transfer."""
# Create an index so we can find this pathspec later.
self.state.indexed_pathspecs.append(pathspec)
self.state.request_data_list.append(request_data)
self._TryToStartNextPathspec() | The entry point for this flow mixin - Schedules new file transfer. | StartFileFetch | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _TryToStartNextPathspec(self):
"""Try to schedule the next pathspec if there is enough capacity."""
# If there's no capacity, there's nothing to do here.
if not self._HasEnoughCapacity():
return
try:
index = self.state.next_pathspec_to_start
pathspec = self.state.indexed_pathspecs[index]
self.state.next_pathspec_to_start = index + 1
except IndexError:
# We did all the pathspecs, nothing left to do here.
return
# First stat the file, then hash the file if needed.
self._ScheduleStatFile(index, pathspec)
if self.state.stop_at_stat:
return
self._ScheduleHashFile(index, pathspec) | Try to schedule the next pathspec if there is enough capacity. | _TryToStartNextPathspec | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _HasEnoughCapacity(self) -> bool:
"""Checks whether there is enough capacity to schedule next pathspec."""
if self.state.maximum_pending_files <= len(self.state.pending_files):
return False
if self.state.maximum_pending_files <= len(self.state.pending_hashes):
return False
if self.state.maximum_pending_files <= len(self.state.pending_stats):
return False
return True | Checks whether there is enough capacity to schedule next pathspec. | _HasEnoughCapacity | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _ScheduleStatFile(self, index: int, pathspec: rdf_paths.PathSpec) -> None:
"""Schedules the appropriate Stat File Client Action.
Args:
index: Index of the current file to get Stat for.
pathspec: Pathspec of the current file to get Stat for.
"""
# Add the file tracker to the pending stats list where it waits until the
# stat comes back.
self.state.pending_stats[index] = {"index": index}
request = rdf_client_action.GetFileStatRequest(
pathspec=pathspec,
follow_symlink=True,
)
self.CallClient(
server_stubs.GetFileStat,
request,
next_state=self._ReceiveFileStat.__name__,
request_data=dict(index=index, request_name="GetFileStat"),
) | Schedules the appropriate Stat File Client Action.
Args:
index: Index of the current file to get Stat for.
pathspec: Pathspec of the current file to get Stat for. | _ScheduleStatFile | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _ScheduleHashFile(self, index: int, pathspec: rdf_paths.PathSpec) -> None:
"""Schedules the HashFile Client Action.
Args:
index: Index of the current file to be hashed.
pathspec: Pathspec of the current file to be hashed.
"""
# Add the file tracker to the pending hashes list where it waits until the
# hash comes back.
self.state.pending_hashes[index] = {"index": index}
request = rdf_client_action.FingerprintRequest(
pathspec=pathspec, max_filesize=self.state.file_size
)
request.AddRequest(
fp_type=rdf_client_action.FingerprintTuple.Type.FPT_GENERIC,
hashers=[
rdf_client_action.FingerprintTuple.HashType.MD5,
rdf_client_action.FingerprintTuple.HashType.SHA1,
rdf_client_action.FingerprintTuple.HashType.SHA256,
],
)
self.CallClient(
server_stubs.HashFile,
request,
next_state=self._ReceiveFileHash.__name__,
request_data=dict(index=index),
) | Schedules the HashFile Client Action.
Args:
index: Index of the current file to be hashed.
pathspec: Pathspec of the current file to be hashed. | _ScheduleHashFile | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _RemoveCompletedPathspec(self, index):
"""Removes a pathspec from the list of pathspecs."""
pathspec = self.state.indexed_pathspecs[index]
request_data = self.state.request_data_list[index]
self.state.indexed_pathspecs[index] = None
self.state.request_data_list[index] = None
self.state.pending_stats.pop(index, None)
self.state.pending_hashes.pop(index, None)
self.state.pending_files.pop(index, None)
# We have a bit more room in the pending_hashes so we try to schedule
# another pathspec.
self._TryToStartNextPathspec()
return pathspec, request_data | Removes a pathspec from the list of pathspecs. | _RemoveCompletedPathspec | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _ReceiveFetchedFile(self, tracker, is_duplicate=False):
"""Remove pathspec for this index and call the ReceiveFetchedFile method."""
index = tracker["index"]
_, request_data = self._RemoveCompletedPathspec(index)
# Report the request_data for this flow's caller.
self.ReceiveFetchedFile(
tracker["stat_entry"],
tracker["hash_obj"],
request_data=request_data,
is_duplicate=is_duplicate,
) | Remove pathspec for this index and call the ReceiveFetchedFile method. | _ReceiveFetchedFile | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def ReceiveFetchedFile(
self,
stat_entry,
file_hash,
request_data=None,
is_duplicate=False,
):
"""This method will be called for each new file successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
is_duplicate: If True, the file wasn't actually collected as its hash was
found in the filestore.
""" | This method will be called for each new file successfully fetched.
Args:
stat_entry: rdf_client_fs.StatEntry object describing the file.
file_hash: rdf_crypto.Hash object with file hashes.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
is_duplicate: If True, the file wasn't actually collected as its hash was
found in the filestore. | ReceiveFetchedFile | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _FileFetchFailed(
self,
index: int,
status: Optional[rdf_flow_objects.FlowStatus],
):
"""Remove pathspec for this index and call the FileFetchFailed method."""
pathspec, request_data = self._RemoveCompletedPathspec(index)
if pathspec is None:
# This was already reported as failed.
return
self.state.files_failed += 1
# Report the request_data for this flow's caller.
self.FileFetchFailed(pathspec, request_data=request_data, status=status) | Remove pathspec for this index and call the FileFetchFailed method. | _FileFetchFailed | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def FileFetchFailed(
self,
pathspec: rdf_paths.PathSpec,
request_data: Any = None,
status: Optional[rdf_flow_objects.FlowStatus] = None,
):
"""This method will be called when stat or hash requests fail.
Args:
pathspec: Pathspec of a file that failed to be fetched.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
status: FlowStatus that contains more error details.
""" | This method will be called when stat or hash requests fail.
Args:
pathspec: Pathspec of a file that failed to be fetched.
request_data: Arbitrary dictionary that was passed to the corresponding
StartFileFetch call.
status: FlowStatus that contains more error details. | FileFetchFailed | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
def _ReceiveFileStat(self, responses):
"""Stores stat entry in the flow's state."""
index = responses.request_data["index"]
if not responses.success:
self.Log("Failed to stat file: %s", responses.status)
self.state.pending_stats.pop(index, None)
# Report failure.
self._FileFetchFailed(index, status=responses.status)
return
stat_entry = responses.First()
# This stat is no longer pending, so we free the tracker.
self.state.pending_stats.pop(index, None)
request_data = self.state.request_data_list[index]
self.ReceiveFetchedFileStat(stat_entry, request_data)
if self.state.stop_at_stat:
self._RemoveCompletedPathspec(index)
return
# Propagate stat information to hash queue (same index is used across).
hash_tracker = self.state.pending_hashes[index]
hash_tracker["stat_entry"] = stat_entry | Stores stat entry in the flow's state. | _ReceiveFileStat | python | google/grr | grr/server/grr_response_server/flows/general/transfer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/general/transfer.py | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.