code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def testCPULimitExceeded(self):
"""This tests that the cpu limit for flows is working."""
client_mock = action_mocks.CPULimitClientMock(
user_cpu_usage=[10], system_cpu_usage=[10], network_usage=[1000]
)
with test_lib.SuppressLogs():
flow_id = flow_test_lib.StartAndRunFlow(
flow_test_lib.CPULimitFlow,
client_mock=client_mock,
client_id=self.client_id,
cpu_limit=30,
network_bytes_limit=10000,
check_flow_errors=False,
)
rdf_flow = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id)
self.assertEqual(rdf_flow.flow_state, flows_pb2.Flow.FlowState.ERROR)
self.assertIn("CPU limit exceeded", rdf_flow.error_message) | This tests that the cpu limit for flows is working. | testCPULimitExceeded | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testNetworkLimitExceeded(self):
"""This tests that the network limit for flows is working."""
client_mock = action_mocks.CPULimitClientMock(
user_cpu_usage=[10], system_cpu_usage=[10], network_usage=[1000]
)
with test_lib.SuppressLogs():
flow_id = flow_test_lib.StartAndRunFlow(
flow_test_lib.CPULimitFlow,
client_mock=client_mock,
client_id=self.client_id,
cpu_limit=1000,
network_bytes_limit=1500,
check_flow_errors=False,
)
rdf_flow = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id)
self.assertEqual(rdf_flow.flow_state, flows_pb2.Flow.FlowState.ERROR)
self.assertIn("bytes limit exceeded", rdf_flow.error_message) | This tests that the network limit for flows is working. | testNetworkLimitExceeded | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testUserGetsNotificationWithNumberOfResultsProto(self):
username = "notification_test_user"
self.CreateUser(username)
class FlowWithMultipleResultTypesProto(flow_base.FlowBase):
"""Flow with multiple result types."""
proto_result_types = (
jobs_pb2.LogMessage,
jobs_pb2.PathSpec,
jobs_pb2.ClientInformation,
)
def Start(self):
self.SendReplyProto(jobs_pb2.LogMessage(data="foo"))
self.SendReplyProto(jobs_pb2.PathSpec(path="bar.txt"))
self.SendReplyProto(jobs_pb2.PathSpec(path="baz.txt"))
self.SendReplyProto(jobs_pb2.ClientInformation(client_name="foo"))
self.SendReplyProto(jobs_pb2.ClientInformation(client_name="bar"))
self.SendReplyProto(jobs_pb2.ClientInformation(client_name="baz"))
flow_test_lib.StartAndRunFlow(
FlowWithMultipleResultTypesProto,
client_id=self.client_id,
creator=username,
)
notifications = self.GetUserNotifications(username)
self.assertIn(
"FlowWithMultipleResultTypesProto completed with 6 results",
notifications[0].message,
) | Flow with multiple result types. | testUserGetsNotificationWithNumberOfResultsProto | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testOutputPluginsOnlyRunInParentFlow_DoesNotForwardProto(self):
class ChildFlowProtoIgnored(flow_base.FlowBase):
proto_result_types = (jobs_pb2.LogMessage,)
def Start(self):
self.SendReplyProto(jobs_pb2.LogMessage(data="IgnoredInParent"))
class ParentFlowWithoutForwardingOutputPluginsProto(flow_base.FlowBase):
"""This flow creates a Child without forwarding OutputPlugins."""
proto_result_types = (jobs_pb2.LogMessage,)
def Start(self):
# Call the child flow WITHOUT output plugins.
self.CallFlowProto(
ChildFlowProtoIgnored.__name__, next_state="IgnoreChildReplies"
)
def IgnoreChildReplies(self, responses):
del responses # Unused
self.SendReplyProto(jobs_pb2.LogMessage(data="Parent received"))
self.RunFlow(
flow_cls=ParentFlowWithoutForwardingOutputPluginsProto,
client_mock=ClientMock(),
output_plugins=[
rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyFlowOutputPlugin"
)
],
)
# Parent calls once, and child doesn't call.
self.assertEqual(test_output_plugins.DummyFlowOutputPlugin.num_calls, 1)
# Parent has one response, child has two.
self.assertEqual(test_output_plugins.DummyFlowOutputPlugin.num_responses, 1) | This flow creates a Child without forwarding OutputPlugins. | testOutputPluginsOnlyRunInParentFlow_DoesNotForwardProto | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testAllConfigs(self):
"""Go through all our config files looking for errors."""
# Test the current loaded configuration.
configs = [config.CONFIG]
# Test all the other configs in the server config dir (/etc/grr by default)
glob_path = os.path.join(config.CONFIG["Config.directory"], "*.yaml")
for cfg_file in glob.glob(glob_path):
if os.access(cfg_file, os.R_OK):
configs.append(cfg_file)
else:
logging.info(
"Skipping checking %s, you probably need to be root", cfg_file
)
self.ValidateConfigs(configs) | Go through all our config files looking for errors. | testAllConfigs | python | google/grr | grr/server/grr_response_server/config_validation_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/config_validation_test.py | Apache-2.0 |
def setupAndRun(self, cls: type[flow_base.FlowBase]) -> flows_pb2.Flow:
"""Sets up and runs a flow of the given type."""
assert data_store.REL_DB is not None
db = data_store.REL_DB
client_id = db_test_utils.InitializeClient(db)
test_username = db_test_utils.InitializeUser(db)
flow_id = flow_test_lib.StartAndRunFlow(
cls,
action_mocks.ActionMock(action_mocks.Store),
client_id=client_id,
creator=test_username,
)
return db.ReadFlowObject(client_id, flow_id) | Sets up and runs a flow of the given type. | setupAndRun | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallState(self):
class StoreCallStateFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallState(next_state="AfterCallState")
def AfterCallState(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallState!"
flow = self.setupAndRun(StoreCallStateFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallState!") | Dummy flow that uses store. | testStorePersists_CallState | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def ProcessMessageHandlerRequests(
requests: Sequence[objects_pb2.MessageHandlerRequest],
) -> None:
"""Processes message handler requests."""
logging.info(
"Leased message handler request ids: %s",
",".join(str(r.request_id) for r in requests),
)
grouped_requests = collection.Group(requests, lambda r: r.handler_name)
for handler_name, requests_for_handler in grouped_requests.items():
requests_for_handler = [
mig_objects.ToRDFMessageHandlerRequest(r) for r in requests_for_handler
]
handler_cls = handler_registry.handler_name_map.get(handler_name)
if not handler_cls:
logging.error("Unknown message handler: %s", handler_name)
continue
num_requests = len(requests_for_handler)
WELL_KNOWN_FLOW_REQUESTS.Increment(
fields=[handler_name], delta=num_requests
)
try:
logging.debug(
"Running %d messages for handler %s", num_requests, handler_name
)
handler_cls().ProcessMessages(requests_for_handler)
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Exception while processing message handler %s: %s", handler_name, e
)
logging.info(
"Deleting message handler request ids: %s",
",".join(str(r.request_id) for r in requests),
)
data_store.REL_DB.DeleteMessageHandlerRequests(requests) | Processes message handler requests. | ProcessMessageHandlerRequests | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def __init__(self, disabled: bool = False):
"""Constructor."""
self.disabled = disabled
logging.info("Started GRR worker.") | Constructor. | __init__ | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def Run(self) -> None:
"""Event loop."""
if not self.disabled:
data_store.REL_DB.RegisterMessageHandler(
ProcessMessageHandlerRequests,
self.message_handler_lease_time,
limit=100,
)
data_store.REL_DB.RegisterFlowProcessingHandler(self.ProcessFlow)
try:
# The main thread just keeps sleeping and listens to keyboard interrupt
# events in case the server is running from a console.
while True:
time.sleep(3600)
except KeyboardInterrupt:
logging.info("Caught interrupt, exiting.")
self.Shutdown() | Event loop. | Run | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def _ReleaseProcessedFlow(self, flow_obj: rdf_flow_objects.Flow) -> bool:
"""Release a processed flow if the processing deadline is not exceeded."""
rdf_flow = flow_obj.rdf_flow
if rdf_flow.processing_deadline < rdfvalue.RDFDatetime.Now():
raise flow_base.FlowError(
"Lease expired for flow %s on %s (%s)."
% (
rdf_flow.flow_id,
rdf_flow.client_id,
rdf_flow.processing_deadline,
),
)
flow_obj.FlushQueuedMessages()
proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow)
return data_store.REL_DB.ReleaseProcessedFlow(proto_flow) | Release a processed flow if the processing deadline is not exceeded. | _ReleaseProcessedFlow | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def ProcessFlow(
self, flow_processing_request: flows_pb2.FlowProcessingRequest
) -> None:
"""The callback for the flow processing queue."""
client_id = flow_processing_request.client_id
flow_id = flow_processing_request.flow_id
data_store.REL_DB.AckFlowProcessingRequests([flow_processing_request])
try:
flow = data_store.REL_DB.LeaseFlowForProcessing(
client_id,
flow_id,
processing_time=rdfvalue.Duration.From(6, rdfvalue.HOURS),
)
except db.ParentHuntIsNotRunningError:
flow_base.TerminateFlow(client_id, flow_id, "Parent hunt stopped.")
return
rdf_flow = mig_flow_objects.ToRDFFlow(flow)
first_request_to_process = rdf_flow.next_request_to_process
logging.info(
"Processing Flow %s/%s/%d (%s).",
client_id,
flow_id,
first_request_to_process,
rdf_flow.flow_class_name,
)
flow_cls = registry.FlowRegistry.FlowClassByName(rdf_flow.flow_class_name)
flow_obj = flow_cls(rdf_flow)
if not flow_obj.IsRunning():
logging.info(
"Received a request to process flow %s on client %s that is not "
"running.",
flow_id,
client_id,
)
return
processed, incrementally_processed = flow_obj.ProcessAllReadyRequests()
if processed == 0 and incrementally_processed == 0:
raise FlowHasNothingToProcessError(
"Unable to process any requests for flow %s on client %s."
% (flow_id, client_id)
)
while not self._ReleaseProcessedFlow(flow_obj):
processed, incrementally_processed = flow_obj.ProcessAllReadyRequests()
if processed == 0 and incrementally_processed == 0:
raise FlowHasNothingToProcessError(
"%s/%s: ReleaseProcessedFlow returned false but no "
"request could be processed (next req: %d)."
% (client_id, flow_id, flow_obj.rdf_flow.next_request_to_process)
)
if flow_obj.IsRunning():
logging.info(
"Processing Flow %s/%s/%d (%s) done, next request to process: %d.",
client_id,
flow_id,
first_request_to_process,
rdf_flow.flow_class_name,
rdf_flow.next_request_to_process,
)
else:
logging.info(
"Processing Flow %s/%s/%d (%s) done, flow is done.",
client_id,
flow_id,
first_request_to_process,
rdf_flow.flow_class_name,
) | The callback for the flow processing queue. | ProcessFlow | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def CreateBlobStore(self):
"""Create a test blob store.
Returns:
A tuple (blob_store, cleanup), where blob_store is an instance of
blob_store.BlobStore to be tested and cleanup is a function which
destroys blob_store, releasing any resources held by it.
""" | Create a test blob store.
Returns:
A tuple (blob_store, cleanup), where blob_store is an instance of
blob_store.BlobStore to be tested and cleanup is a function which
destroys blob_store, releasing any resources held by it. | CreateBlobStore | python | google/grr | grr/server/grr_response_server/blob_store_test_mixin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store_test_mixin.py | Apache-2.0 |
def FromResponses(cls, request=None, responses=None) -> "Responses":
"""Creates a Responses object from new style flow request and responses."""
res = cls()
res.request = request
if request:
res.request_data = request.request_data
for r in responses or []:
if isinstance(r, rdf_flow_objects.FlowResponse):
res.responses.append(r.payload)
elif isinstance(r, rdf_flow_objects.FlowStatus):
res.status = r
res.success = r.status == "OK"
elif isinstance(r, rdf_flow_objects.FlowIterator):
pass
else:
raise TypeError("Got unexpected response type: %s" % type(r))
return res | Creates a Responses object from new style flow request and responses. | FromResponses | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def FromResponsesProto2Any(
cls,
responses: Sequence[
Union[
rdf_flow_objects.FlowResponse,
rdf_flow_objects.FlowStatus,
rdf_flow_objects.FlowIterator,
],
],
request: Optional[rdf_flow_objects.FlowRequest] = None,
) -> "Responses[any_pb2.Any]":
# pytype: enable=name-error
"""Creates a `Response` object from raw flow responses.
Unlike the `Responses.FromResponses` method, this method does not use any
RDF-value magic to deserialize `Any` messages on the fly. Instead, it just
passes raw `Any` message as it is stored in the `any_payload` field of the
`FlowResponse` message.
Args:
responses: Flow responses from which to construct this object.
request: Flow request to which these responses belong.
Returns:
Wrapped flow responses.
"""
result = Responses()
if request is not None:
result.request = request
result.request_data = request.request_data
for response in responses:
if isinstance(response, rdf_flow_objects.FlowStatus):
if result.status is not None:
raise ValueError(f"Duplicated status response: {response}")
result.success = (
response.status == rdf_flow_objects.FlowStatus.Status.OK
)
result.status = response
elif isinstance(response, rdf_flow_objects.FlowResponse):
result.responses.append(response.any_payload.AsPrimitiveProto())
else:
# Note that this also covers `FlowIterator`—it is a legacy class that
# should no longer be used and new state methods (that are expected to
# trigger this code path) should not rely on it.
raise TypeError(f"Unexpected response: {response}")
if result.status is None:
raise ValueError("Missing status response")
return result | Creates a `Response` object from raw flow responses.
Unlike the `Responses.FromResponses` method, this method does not use any
RDF-value magic to deserialize `Any` messages on the fly. Instead, it just
passes raw `Any` message as it is stored in the `any_payload` field of the
`FlowResponse` message.
Args:
responses: Flow responses from which to construct this object.
request: Flow request to which these responses belong.
Returns:
Wrapped flow responses. | FromResponsesProto2Any | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def First(self) -> Optional[T]:
"""A convenience method to return the first response."""
for x in self:
return x | A convenience method to return the first response. | First | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def Last(self) -> Optional[T]:
"""A convenience method to return the last response."""
*_, last = self
return last | A convenience method to return the last response. | Last | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def GetBigQueryClient(
service_account_json=None, project_id=None, dataset_id=None
):
"""Create a BigQueryClient."""
service_account_data = (
service_account_json or config.CONFIG["BigQuery.service_acct_json"]
)
project_id = project_id or config.CONFIG["BigQuery.project_id"]
dataset_id = dataset_id or config.CONFIG["BigQuery.dataset_id"]
if not (service_account_data and project_id and dataset_id):
raise RuntimeError(
"BigQuery.service_account_json, "
"BigQuery.project_id and BigQuery.dataset_id "
"must be defined."
)
creds = ServiceAccountCredentials.from_json_keyfile_dict(
json.loads(service_account_data), scopes=BIGQUERY_SCOPE
)
http_obj = httplib2.Http()
http_obj = creds.authorize(http_obj)
service = discovery.build("bigquery", "v2", http=http_obj)
return BigQueryClient(
project_id=project_id, bq_service=service, dataset_id=dataset_id
) | Create a BigQueryClient. | GetBigQueryClient | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def CreateDataset(self):
"""Create a dataset."""
body = {
"datasetReference": {
"datasetId": self.dataset_id,
"description": "Data exported from GRR",
"friendlyName": "GRRExportData",
"projectId": self.project_id,
}
}
result = (
self.service.datasets()
.insert(projectId=self.project_id, body=body)
.execute()
)
self.datasets[self.dataset_id] = result
return result | Create a dataset. | CreateDataset | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def GetDataset(self, dataset_id):
"""Get a dataset."""
if dataset_id not in self.datasets:
try:
result = (
self.service.datasets()
.get(projectId=self.project_id, datasetId=dataset_id)
.execute()
)
self.datasets[dataset_id] = result
except errors.HttpError:
return None
return self.datasets[dataset_id] | Get a dataset. | GetDataset | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def IsErrorRetryable(self, e):
"""Return true if we should retry on this error.
Default status codes come from this advice:
https://developers.google.com/api-client-library/python/guide/media_upload
Args:
e: errors.HttpError object.
Returns:
boolean
"""
return e.resp.status in config.CONFIG["BigQuery.retry_status_codes"] | Return true if we should retry on this error.
Default status codes come from this advice:
https://developers.google.com/api-client-library/python/guide/media_upload
Args:
e: errors.HttpError object.
Returns:
boolean | IsErrorRetryable | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def InsertData(self, table_id, fd, schema, job_id):
"""Insert data into a bigquery table.
If the table specified doesn't exist, it will be created with the specified
schema.
Args:
table_id: string table id
fd: open file descriptor containing the newline separated JSON
schema: BigQuery schema dict
job_id: string job id
Returns:
API response object on success, None on failure
"""
configuration = {
"schema": {
"fields": schema,
},
"destinationTable": {
"projectId": self.project_id,
"tableId": table_id,
"datasetId": self.dataset_id,
},
"sourceFormat": "NEWLINE_DELIMITED_JSON",
}
body = {
"configuration": {
"load": configuration,
},
"jobReference": {
"projectId": self.project_id,
"jobId": job_id,
},
}
# File content can be gzipped for bandwidth efficiency. The server handles
# it correctly without any changes to the request.
mediafile = http.MediaFileUpload(
fd.name, mimetype="application/octet-stream"
)
job = self.service.jobs().insert(
projectId=self.project_id, body=body, media_body=mediafile
)
first_try = True
@retry.When(
errors.HttpError,
self.IsErrorRetryable,
opts=retry.Opts(
attempts=config.CONFIG["BigQuery.retry_max_attempts"],
init_delay=config.CONFIG["BigQuery.retry_interval"].AsTimedelta(),
backoff=config.CONFIG["BigQuery.retry_multiplier"],
),
)
def Execute() -> None:
nonlocal first_try
try:
job.execute()
except errors.HttpError:
if first_try:
first_try = False
if self.GetDataset(self.dataset_id):
logging.exception("Error with job: %s", job_id)
else:
# If this is our first export ever, we need to create the dataset.
logging.info("Attempting to create dataset: %s", self.dataset_id)
self.CreateDataset()
raise
try:
Execute()
except errors.HttpError as error:
raise BigQueryJobUploadError(f"Failed job '{job_id}'") from error | Insert data into a bigquery table.
If the table specified doesn't exist, it will be created with the specified
schema.
Args:
table_id: string table id
fd: open file descriptor containing the newline separated JSON
schema: BigQuery schema dict
job_id: string job id
Returns:
API response object on success, None on failure | InsertData | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def ProcessEvents(self, msgs=None, publisher_username=None):
"""Processes a message for the event.""" | Processes a message for the event. | ProcessEvents | python | google/grr | grr/server/grr_response_server/events.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/events.py | Apache-2.0 |
def PublishEvent(cls, event_name, event, username=None):
"""Publish the message into all listeners of the event.
We send the message to all event handlers which contain this
string in their EVENT static member. This allows the event to be
sent to multiple interested listeners.
Args:
event_name: An event name.
event: The message to send to the event handler.
username: Username of the publisher of the message.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
"""
cls.PublishMultipleEvents({event_name: [event]}, username=username) | Publish the message into all listeners of the event.
We send the message to all event handlers which contain this
string in their EVENT static member. This allows the event to be
sent to multiple interested listeners.
Args:
event_name: An event name.
event: The message to send to the event handler.
username: Username of the publisher of the message.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage. | PublishEvent | python | google/grr | grr/server/grr_response_server/events.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/events.py | Apache-2.0 |
def PublishMultipleEvents(cls, events, username=None):
"""Publishes multiple messages at once.
Args:
events: A dict with keys being event names and values being lists of
messages.
username: Username of the publisher of the messages.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
"""
event_name_map = EventRegistry.EVENT_NAME_MAP
for event_name, messages in events.items():
if not isinstance(event_name, str):
raise ValueError(
"Event names should be string, got: %s" % type(event_name)
)
for msg in messages:
if not isinstance(msg, rdfvalue.RDFValue):
raise ValueError("Can only publish RDFValue instances.")
for event_cls in event_name_map.get(event_name, []):
event_cls().ProcessEvents(messages, publisher_username=username) | Publishes multiple messages at once.
Args:
events: A dict with keys being event names and values being lists of
messages.
username: Username of the publisher of the messages.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage. | PublishMultipleEvents | python | google/grr | grr/server/grr_response_server/events.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/events.py | Apache-2.0 |
def GetClientIDsForHostnames(
hostnames: Iterable[str],
) -> Mapping[str, Sequence[str]]:
"""Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
Returns:
A dict with a list of all known GRR client_ids for each hostname.
"""
index = ClientIndex()
keywords = set()
for hostname in hostnames:
if hostname.startswith("host:"):
keywords.add(hostname)
else:
keywords.add("host:%s" % hostname)
results = index.ReadClientPostingLists(keywords)
result = {}
for keyword, hits in results.items():
result[keyword[len("host:") :]] = hits
return result | Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
Returns:
A dict with a list of all known GRR client_ids for each hostname. | GetClientIDsForHostnames | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def _AnalyzeKeywords(self, keywords):
"""Extracts a start time from a list of keywords if present."""
start_time = max(
rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(180, rdfvalue.DAYS),
data_store.REL_DB.MinTimestamp(),
)
filtered_keywords = []
for k in keywords:
if k.startswith(self.START_TIME_PREFIX):
try:
start_time = rdfvalue.RDFDatetime.FromHumanReadable(
k[self.START_TIME_PREFIX_LEN :]
)
except ValueError:
pass
else:
filtered_keywords.append(k)
if not filtered_keywords:
filtered_keywords.append(".")
return start_time, filtered_keywords | Extracts a start time from a list of keywords if present. | _AnalyzeKeywords | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def LookupClients(self, keywords: Iterable[str]) -> Sequence[str]:
"""Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable.
"""
if isinstance(keywords, str):
raise ValueError(
"Keywords should be an iterable, not a string (got %s)." % keywords
)
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
keyword_map = data_store.REL_DB.ListClientsForKeywords(
list(map(self._NormalizeKeyword, filtered_keywords)),
start_time=start_time,
)
relevant_set = functools.reduce(
operator.and_, map(set, keyword_map.values())
)
return sorted(relevant_set) | Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable. | LookupClients | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def ReadClientPostingLists(
self, keywords: Iterable[str]
) -> Mapping[str, Sequence[str]]:
"""Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients.
"""
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
return data_store.REL_DB.ListClientsForKeywords(
filtered_keywords, start_time=start_time
) | Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients. | ReadClientPostingLists | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def AnalyzeClient(self, client: rdf_objects.ClientSnapshot) -> Sequence[str]:
"""Finds the client_id and keywords for a client.
Args:
client: A ClientSnapshot object record to find keywords for.
Returns:
A Sequence of keywords related to client.
"""
# Start with a universal keyword, used to find all clients.
#
# TODO(user): Remove the universal keyword once we have a better way
# to do this, i.e., once we have a storage library which can list all
# clients directly.
keywords = set(["."])
def TryAppend(prefix, keyword):
precondition.AssertType(prefix, str)
precondition.AssertType(keyword, str)
if keyword:
keyword_string = self._NormalizeKeyword(keyword)
keywords.add(keyword_string)
if prefix:
keywords.add(prefix + ":" + keyword_string)
def TryAppendPrefixes(prefix, keyword, delimiter):
TryAppend(prefix, keyword)
segments = keyword.split(delimiter)
for i in range(1, len(segments)):
TryAppend(prefix, delimiter.join(segments[0:i]))
return len(segments)
def TryAppendIP(ip):
TryAppend("ip", ip)
# IP4v?
if TryAppendPrefixes("ip", str(ip), ".") == 4:
return
# IP6v?
TryAppendPrefixes("ip", str(ip), ":")
def TryAppendMac(mac):
TryAppend("mac", mac)
if len(mac) == 12:
# If looks like a mac address without ":" symbols, also add the keyword
# with them.
TryAppend("mac", ":".join([mac[i : i + 2] for i in range(0, 12, 2)]))
TryAppend("host", client.knowledge_base.fqdn)
host = client.knowledge_base.fqdn.split(".", 1)[0]
TryAppendPrefixes("host", host, "-")
TryAppendPrefixes("host", client.knowledge_base.fqdn, ".")
TryAppend("", client.knowledge_base.os)
TryAppend("", client.os_release)
TryAppend("", client.os_version)
TryAppend("", client.kernel)
TryAppend("", client.arch)
TryAppend("serial_number", client.hardware_info.serial_number)
TryAppend("system_uuid", client.hardware_info.system_uuid)
kb = client.knowledge_base
if kb:
for user in kb.users:
TryAppend("user", user.username)
TryAppend("", user.full_name)
if user.full_name:
for name in user.full_name.split():
# full_name often includes nicknames and similar, wrapped in
# punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common
# wrapping characters.
TryAppend("", name.strip("\"'()"))
for ip in client.GetIPAddresses():
TryAppendIP(ip)
for mac in client.GetMacAddresses():
TryAppendMac(mac)
client_info = client.startup_info.client_info
if client_info:
TryAppend("client", client_info.client_name)
TryAppend("client", str(client_info.client_version))
if client_info.labels:
for label in client_info.labels:
TryAppend("label", label)
return keywords | Finds the client_id and keywords for a client.
Args:
client: A ClientSnapshot object record to find keywords for.
Returns:
A Sequence of keywords related to client. | AnalyzeClient | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def AddClient(self, client: rdf_objects.ClientSnapshot):
"""Adds a client to the index.
Args:
client: A ClientSnapshot object record.
"""
keywords = self.AnalyzeClient(client)
keywords.add(self._NormalizeKeyword(client.client_id))
data_store.REL_DB.AddClientKeywords(client.client_id, keywords) | Adds a client to the index.
Args:
client: A ClientSnapshot object record. | AddClient | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def MultiAddClientLabels(
self,
client_ids: Collection[str],
labels: Collection[str],
) -> None:
"""Associates given labels with the specified clients.
Args:
client_ids: Client identifiers of clients to annotate with the labels.
labels: Labels to use for annotating the clients.
"""
precondition.AssertIterableType(labels, str)
keywords = set()
for label in labels:
keyword_string = self._NormalizeKeyword(label)
keywords.add(keyword_string)
keywords.add("label:" + keyword_string)
data_store.REL_DB.MultiAddClientKeywords(client_ids, keywords) | Associates given labels with the specified clients.
Args:
client_ids: Client identifiers of clients to annotate with the labels.
labels: Labels to use for annotating the clients. | MultiAddClientLabels | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def RemoveAllClientLabels(self, client_id: str):
"""Removes all labels for a given client.
Args:
client_id: The client_id.
"""
labels_to_remove = set(
[l.name for l in data_store.REL_DB.ReadClientLabels(client_id)]
)
self.RemoveClientLabels(client_id, labels_to_remove) | Removes all labels for a given client.
Args:
client_id: The client_id. | RemoveAllClientLabels | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def RemoveClientLabels(self, client_id: str, labels: Iterable[str]):
"""Removes all labels for a given client.
Args:
client_id: The client_id.
labels: A list of labels to remove.
"""
for label in labels:
keyword = self._NormalizeKeyword(label)
# This might actually delete a keyword with the same name as the label (if
# there is one).
data_store.REL_DB.RemoveClientKeyword(client_id, keyword)
data_store.REL_DB.RemoveClientKeyword(client_id, "label:%s" % keyword) | Removes all labels for a given client.
Args:
client_id: The client_id.
labels: A list of labels to remove. | RemoveClientLabels | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def Evaluate(self, client_info):
"""Evaluates the rule represented by this object.
Args:
client_info: A `db.ClientFullInfo` instance.
Returns:
A bool value of the evaluation.
"""
raise NotImplementedError | Evaluates the rule represented by this object.
Args:
client_info: A `db.ClientFullInfo` instance.
Returns:
A bool value of the evaluation. | Evaluate | python | google/grr | grr/server/grr_response_server/foreman_rules.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_rules.py | Apache-2.0 |
def Evaluate(self, client_info):
"""Evaluates rules held in the rule set.
Args:
client_info: A client_info dict as returned by ReadFullInfoClient.
Returns:
A bool value of the evaluation.
Raises:
ValueError: The match mode is of unknown value.
"""
if self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL:
quantifier = all
elif self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY:
quantifier = any
else:
raise ValueError("Unexpected match mode value: %s" % self.match_mode)
return quantifier(rule.Evaluate(client_info) for rule in self.rules) | Evaluates rules held in the rule set.
Args:
client_info: A client_info dict as returned by ReadFullInfoClient.
Returns:
A bool value of the evaluation.
Raises:
ValueError: The match mode is of unknown value. | Evaluate | python | google/grr | grr/server/grr_response_server/foreman_rules.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_rules.py | Apache-2.0 |
def hunt_id(self):
"""Returns hunt id of this rule's actions or None if there's none."""
for action in self.actions or []:
if action.hunt_id is not None:
return action.hunt_id | Returns hunt id of this rule's actions or None if there's none. | hunt_id | python | google/grr | grr/server/grr_response_server/foreman_rules.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_rules.py | Apache-2.0 |
def __init__(self, address, port):
"""Instantiates a new StatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to.
"""
super().__init__(address, port)
self._http_server = None
self._server_thread = None | Instantiates a new StatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to. | __init__ | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def Start(self):
"""Start HTTPServer."""
ip = ipaddress.ip_address(self.address)
if ip.version == 4:
server_cls = http_server.HTTPServer
else:
server_cls = IPv6HTTPServer
try:
self._http_server = server_cls(
(self.address, self.port),
StatsServerHandler,
)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
raise base_stats_server.PortInUseError(self.port)
else:
raise
self._server_thread = threading.Thread(
target=self._http_server.serve_forever
)
self._server_thread.daemon = True
self._server_thread.start() | Start HTTPServer. | Start | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def Stop(self):
"""Stops serving statistics."""
self._http_server.shutdown()
self._server_thread.join() | Stops serving statistics. | Stop | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def InitializeStatsServerOnce():
"""Starts up a varz server after everything is registered.
StatsServer implementation may be overridden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one.
"""
address = config.CONFIG["Monitoring.http_address"]
# Figure out which port to use.
port = config.CONFIG["Monitoring.http_port"]
if not port:
logging.info("Monitoring server disabled.")
return
max_port = config.CONFIG.Get("Monitoring.http_port_max", None)
if max_port is None:
# Use the same number of available ports as the adminui is using. If we
# have 10 available for adminui we will need 10 for the stats server.
adminui_max_port = config.CONFIG.Get(
"AdminUI.port_max", config.CONFIG["AdminUI.port"]
)
max_port = port + adminui_max_port - config.CONFIG["AdminUI.port"]
try:
# pylint: disable=g-import-not-at-top
from grr_response_server.local import stats_server
# pylint: enable=g-import-not-at-top
server_cls = stats_server.StatsServer
logging.debug("Using local StatsServer")
except ImportError:
logging.debug("Using default StatsServer")
server_cls = StatsServer
for port in range(port, max_port + 1):
try:
logging.info(
"Starting monitoring server on address %s and port %d.", address, port
)
server_obj = server_cls(address, port)
server_obj.Start()
return
except base_stats_server.PortInUseError as e:
if e.port < max_port:
logging.info(e)
continue
raise | Starts up a varz server after everything is registered.
StatsServer implementation may be overridden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one. | InitializeStatsServerOnce | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def __init__(self, port):
"""Instantiates a new PortInUseError.
Args:
port: The port being used.
"""
super().__init__("Port {} is already in use.".format(port))
self.port = port | Instantiates a new PortInUseError.
Args:
port: The port being used. | __init__ | python | google/grr | grr/server/grr_response_server/base_stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/base_stats_server.py | Apache-2.0 |
def __init__(self, address, port):
"""Instantiates a new BaseStatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to.
"""
self.address = address
self.port = port | Instantiates a new BaseStatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to. | __init__ | python | google/grr | grr/server/grr_response_server/base_stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/base_stats_server.py | Apache-2.0 |
def Start(self):
"""Starts serving statistics.
Raises:
PortInUseError: The given port is already used.
"""
raise NotImplementedError() | Starts serving statistics.
Raises:
PortInUseError: The given port is already used. | Start | python | google/grr | grr/server/grr_response_server/base_stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/base_stats_server.py | Apache-2.0 |
def __init__(self, message_queue, pool, name):
# fmt: off
"""Initializer.
This creates a new worker object for the ThreadPool class.
Args:
message_queue: A queue.Queue object used by the ThreadPool class to
communicate with the workers. When a new task arrives, the ThreadPool
notifies the workers by putting a message into this queue that has the
format (target, args, name, queueing_time).
target - A callable, the function to call.
args - A tuple of positional arguments to target. Keyword arguments
are not supported.
name - A name for this task. If None, it will be unique generated by
the threading library.
queueing_time - The timestamp when this task was queued as returned by
time.time().
Or, alternatively, the message in the queue can be STOP_MESSAGE
which indicates that the worker should terminate.
pool: The thread pool this worker belongs to.
name: A name for this worker thread.
"""
# fmt: on
super().__init__(name=name)
self.pool = pool
self._queue = message_queue
self.daemon = True
self.idle = True
self.started = time.time() | Initializer.
This creates a new worker object for the ThreadPool class.
Args:
message_queue: A queue.Queue object used by the ThreadPool class to
communicate with the workers. When a new task arrives, the ThreadPool
notifies the workers by putting a message into this queue that has the
format (target, args, name, queueing_time).
target - A callable, the function to call.
args - A tuple of positional arguments to target. Keyword arguments
are not supported.
name - A name for this task. If None, it will be unique generated by
the threading library.
queueing_time - The timestamp when this task was queued as returned by
time.time().
Or, alternatively, the message in the queue can be STOP_MESSAGE
which indicates that the worker should terminate.
pool: The thread pool this worker belongs to.
name: A name for this worker thread. | __init__ | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def ProcessTask(self, target, args, name, queueing_time):
"""Processes the tasks."""
if self.pool.name:
time_in_queue = time.time() - queueing_time
THREADPOOL_QUEUEING_TIME.RecordEvent(
time_in_queue, fields=[self.pool.name]
)
start_time = time.time()
try:
target(*args)
# We can't let a worker die because one of the tasks it has to process
# throws an exception. Therefore, we catch every error that is
# raised in the call to target().
except Exception: # pylint: disable=broad-except
if self.pool.name:
THREADPOOL_TASK_EXCEPTIONS.Increment(fields=[self.pool.name])
logging.exception("Caught exception in worker thread (%s)", name)
if self.pool.name:
total_time = time.time() - start_time
THREADPOOL_WORKING_TIME.RecordEvent(total_time, fields=[self.pool.name]) | Processes the tasks. | ProcessTask | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def _RemoveFromPool(self):
"""Remove ourselves from the pool.
Returns:
True if removal was possible, and False if it was not possible.
"""
with self.pool.lock:
# Pool is shutting down, we can't interfere.
if not self.pool.started:
return False
# Keep a minimum number of threads in the pool.
if len(self.pool) <= self.pool.min_threads:
return False
# Remove us from our pool.
self.pool._RemoveWorker(self.name) # pylint: disable=protected-access
return True | Remove ourselves from the pool.
Returns:
True if removal was possible, and False if it was not possible. | _RemoveFromPool | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def run(self):
"""This overrides the Thread.run method.
This method checks in an endless loop if new tasks are available
in the queue and processes them.
"""
while True:
if self.pool.name:
self.idle = True
try:
# Wait 60 seconds for a message, otherwise exit. This ensures that the
# threadpool will be trimmed down when load is light.
task = self._queue.get(timeout=60)
if self.pool.name:
self.idle = False
try:
# The pool told us to quit, likely because it is stopping.
if task == STOP_MESSAGE:
return
self.ProcessTask(*task)
finally:
self._queue.task_done()
except queue.Empty:
if self._RemoveFromPool():
return
# Try to trim old threads down when they get too old. This helps the
# thread pool size to shrink, even when it is not idle (e.g. if it is CPU
# bound) since threads are forced to exit, but new threads will not be
# created if the utilization is too high - resulting in a reduction of
# threadpool size under CPU load.
if time.time() - self.started > 600 and self._RemoveFromPool():
return | This overrides the Thread.run method.
This method checks in an endless loop if new tasks are available
in the queue and processes them. | run | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Factory(cls, name, min_threads, max_threads=None):
"""Creates a new thread pool with the given name.
If the thread pool of this name already exist, we just return the existing
one. This allows us to have different pools with different characteristics
used by different parts of the code, at the same time.
Args:
name: The name of the required pool.
min_threads: The number of threads in the pool.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Returns:
A threadpool instance.
"""
with cls.factory_lock:
result = cls.POOLS.get(name)
if result is None:
cls.POOLS[name] = result = cls(
name, min_threads, max_threads=max_threads
)
return result | Creates a new thread pool with the given name.
If the thread pool of this name already exist, we just return the existing
one. This allows us to have different pools with different characteristics
used by different parts of the code, at the same time.
Args:
name: The name of the required pool.
min_threads: The number of threads in the pool.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Returns:
A threadpool instance. | Factory | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def __init__(self, name, min_threads, max_threads=None):
"""This creates a new thread pool using min_threads workers.
Args:
name: A prefix to identify this thread pool in the exported stats.
min_threads: The minimum number of worker threads this pool should have.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Raises:
threading.ThreadError: If no threads can be spawned at all, ThreadError
will be raised.
DuplicateThreadpoolError: This exception is raised if a thread pool with
the desired name already exists.
"""
self.min_threads = min_threads
if max_threads is None or max_threads < min_threads:
max_threads = min_threads
self.max_threads = max_threads
self._queue = queue.Queue(maxsize=max_threads)
self.name = name
self.started = False
self.process = psutil.Process()
# A reference for all our workers. Keys are thread names, and values are the
# _WorkerThread instance.
self._workers = {}
# Read-only copy of self._workers that is thread-safe for reading.
self._workers_ro_copy = {}
self.lock = threading.RLock()
if not self.name:
raise ValueError("Unnamed thread pools not allowed.")
if self.name in self.POOLS:
raise DuplicateThreadpoolError(
"A thread pool with the name %s already exists." % name
)
THREADPOOL_OUTSTANDING_TASKS.SetCallback(
self._queue.qsize, fields=[self.name]
)
THREADPOOL_THREADS.SetCallback(lambda: len(self), fields=[self.name])
THREADPOOL_CPU_USE.SetCallback(self.CPUUsage, fields=[self.name]) | This creates a new thread pool using min_threads workers.
Args:
name: A prefix to identify this thread pool in the exported stats.
min_threads: The minimum number of worker threads this pool should have.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Raises:
threading.ThreadError: If no threads can be spawned at all, ThreadError
will be raised.
DuplicateThreadpoolError: This exception is raised if a thread pool with
the desired name already exists. | __init__ | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Start(self):
"""This starts the worker threads."""
if not self.started:
self.started = True
for _ in range(self.min_threads):
self._AddWorker() | This starts the worker threads. | Start | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Stop(self, join_timeout=600):
"""This stops all the worker threads."""
if not self.started:
logging.warning("Tried to stop a thread pool that was not running.")
return
# Remove all workers from the pool.
workers = list(self._workers.values())
self._workers = {}
self._workers_ro_copy = {}
# Send a stop message to all the workers. We need to be careful here to not
# send messages while we are still counting. If threads that haven't been
# counted yet pick up a message and exit, the count will be off and the
# shutdown process will deadlock.
stop_messages_needed = 0
for worker in workers:
if worker.is_alive():
stop_messages_needed += 1
for _ in range(stop_messages_needed):
self._queue.put(STOP_MESSAGE)
self.started = False
self.Join()
# Wait for the threads to all exit now.
for worker in workers:
worker.join(join_timeout)
if worker.is_alive():
raise RuntimeError("Threadpool worker did not finish in time.") | This stops all the worker threads. | Stop | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Join(self):
"""Waits until all outstanding tasks are completed."""
for _ in range(self.JOIN_TIMEOUT_DECISECONDS):
if self._queue.empty() and not self.busy_threads:
return
time.sleep(0.1)
raise ValueError("Timeout during Join() for threadpool %s." % self.name) | Waits until all outstanding tasks are completed. | Join | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def __init__(
self,
batch_size=1000,
threadpool_prefix="batch_processor",
threadpool_size=10,
):
"""BatchProcessor constructor.
Args:
batch_size: All the values will be processed in batches of this size.
threadpool_prefix: Prefix that will be used in thread pool's threads
names.
threadpool_size: Size of a thread pool that will be used. If
threadpool_size is 0, no threads will be used and all conversions will
be done in the current thread.
"""
super().__init__()
self.batch_size = batch_size
self.threadpool_prefix = threadpool_prefix
self.threadpool_size = threadpool_size | BatchProcessor constructor.
Args:
batch_size: All the values will be processed in batches of this size.
threadpool_prefix: Prefix that will be used in thread pool's threads
names.
threadpool_size: Size of a thread pool that will be used. If
threadpool_size is 0, no threads will be used and all conversions will
be done in the current thread. | __init__ | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def ConvertBatch(self, batch):
"""ConvertBatch is called for every batch to do the conversion.
Args:
batch: Batch to convert.
Returns:
List with converted values.
"""
raise NotImplementedError() | ConvertBatch is called for every batch to do the conversion.
Args:
batch: Batch to convert.
Returns:
List with converted values. | ConvertBatch | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Convert(self, values, start_index=0, end_index=None):
"""Converts given collection to exported values.
This method uses a threadpool to do the conversion in parallel. It
blocks for up to one hour until everything is converted.
Args:
values: Iterable object with values to convert.
start_index: Start from this index in the collection.
end_index: Finish processing on the (index - 1) element of the collection.
If None, work till the end of the collection.
Returns:
Nothing. ConvertedBatch() should handle the results.
"""
if not values:
return
try:
total_batch_count = len(values) // self.batch_size
except TypeError:
total_batch_count = -1
pool = ThreadPool.Factory(self.threadpool_prefix, self.threadpool_size)
val_iterator = itertools.islice(values, start_index, end_index)
pool.Start()
try:
for batch_index, batch in enumerate(
collection.Batch(val_iterator, self.batch_size)
):
logging.debug(
"Processing batch %d out of %d", batch_index, total_batch_count
)
pool.AddTask(
target=self.ConvertBatch,
args=(batch,),
name="batch_%d" % batch_index,
inline=False,
)
finally:
pool.Stop(join_timeout=3600) | Converts given collection to exported values.
This method uses a threadpool to do the conversion in parallel. It
blocks for up to one hour until everything is converted.
Args:
values: Iterable object with values to convert.
start_index: Start from this index in the collection.
end_index: Finish processing on the (index - 1) element of the collection.
If None, work till the end of the collection.
Returns:
Nothing. ConvertedBatch() should handle the results. | Convert | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def _TestHeartBeat(self, cron_class, cron_started_event, heartbeat_event):
"""Helper for heartbeat tests."""
cron_name = cron_class.__name__
cronjobs.ScheduleSystemCronJobs(names=[cron_name])
cron_manager = cronjobs.CronManager()
jobs = cronjobs.CronManager().ListJobs()
self.assertIn(cron_name, jobs)
try:
cron_manager.RunOnce()
cron_started_event.wait()
runs = cron_manager.ReadJobRuns(cron_name)
self.assertLen(runs, 1)
self.assertEqual(
runs[0].status, rdf_cronjobs.CronJobRun.CronJobRunStatus.RUNNING
)
finally:
heartbeat_event.set()
cron_manager._GetThreadPool().Join()
runs = cron_manager.ReadJobRuns(cron_name)
self.assertLen(runs, 1)
if cron_class.allow_overruns:
expected_status = rdf_cronjobs.CronJobRun.CronJobRunStatus.FINISHED
else:
expected_status = (
rdf_cronjobs.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED
)
self.assertEqual(runs[0].status, expected_status) | Helper for heartbeat tests. | _TestHeartBeat | python | google/grr | grr/server/grr_response_server/cronjobs_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs_test.py | Apache-2.0 |
def GetMetadata(client_id, client_full_info):
"""Builds ExportedMetadata object for a given client id and ClientFullInfo."""
metadata = base.ExportedMetadata()
last_snapshot = None
if client_full_info.HasField("last_snapshot"):
last_snapshot = client_full_info.last_snapshot
metadata.client_urn = client_id
metadata.client_age = client_full_info.metadata.first_seen
if last_snapshot is not None:
kb = client_full_info.last_snapshot.knowledge_base
os_release = last_snapshot.os_release
os_version = last_snapshot.os_version
metadata.hostname = kb.fqdn
metadata.os = kb.os
metadata.os_release = os_release
metadata.os_version = os_version
metadata.usernames = ",".join(user.username for user in kb.users)
addresses = last_snapshot.GetMacAddresses()
if addresses:
metadata.mac_address = "\n".join(last_snapshot.GetMacAddresses())
metadata.hardware_info = last_snapshot.hardware_info
metadata.kernel_version = last_snapshot.kernel
ci = last_snapshot.cloud_instance
if ci is not None:
if ci.cloud_type == ci.InstanceType.AMAZON:
metadata.cloud_instance_type = metadata.CloudInstanceType.AMAZON
metadata.cloud_instance_id = ci.amazon.instance_id
elif ci.cloud_type == ci.InstanceType.GOOGLE:
metadata.cloud_instance_type = metadata.CloudInstanceType.GOOGLE
metadata.cloud_instance_id = ci.google.unique_id
system_labels = set()
user_labels = set()
for l in client_full_info.labels:
if l.owner == "GRR":
system_labels.add(l.name)
else:
user_labels.add(l.name)
metadata.labels = ",".join(sorted(system_labels | user_labels))
metadata.system_labels = ",".join(sorted(system_labels))
metadata.user_labels = ",".join(sorted(user_labels))
return metadata | Builds ExportedMetadata object for a given client id and ClientFullInfo. | GetMetadata | python | google/grr | grr/server/grr_response_server/export.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export.py | Apache-2.0 |
def ConvertValuesWithMetadata(metadata_value_pairs, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
metadata_value_pairs: Tuples of (metadata, rdf_value), where metadata is an
instance of ExportedMetadata and rdf_value is an RDFValue subclass
instance to be exported.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Yields:
Converted values. Converted values may be of different types.
Raises:
NoConverterFound: in case no suitable converters were found for a value in
metadata_value_pairs. This error is only raised after
all values in metadata_value_pairs are attempted to be
converted. If there are multiple value types that could
not be converted because of the lack of corresponding
converters, only the last one will be specified in the
exception message.
"""
no_converter_found_error = None
metadata_value_groups = collection.Group(
metadata_value_pairs, lambda pair: pair[1].__class__.__name__
)
for metadata_values_group in metadata_value_groups.values():
_, first_value = metadata_values_group[0]
converters_classes = export_converters_registry.GetConvertersByValue(
first_value
)
if not converters_classes:
no_converter_found_error = "No converters found for value: %s" % str(
first_value
)
continue
converters = [cls(options) for cls in converters_classes]
for converter in converters:
for result in converter.BatchConvert(metadata_values_group):
yield result
if no_converter_found_error is not None:
raise NoConverterFound(no_converter_found_error) | Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
metadata_value_pairs: Tuples of (metadata, rdf_value), where metadata is an
instance of ExportedMetadata and rdf_value is an RDFValue subclass
instance to be exported.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Yields:
Converted values. Converted values may be of different types.
Raises:
NoConverterFound: in case no suitable converters were found for a value in
metadata_value_pairs. This error is only raised after
all values in metadata_value_pairs are attempted to be
converted. If there are multiple value types that could
not be converted because of the lack of corresponding
converters, only the last one will be specified in the
exception message. | ConvertValuesWithMetadata | python | google/grr | grr/server/grr_response_server/export.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export.py | Apache-2.0 |
def ConvertValues(default_metadata, values, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: base.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values.
"""
batch_data = [(default_metadata, obj) for obj in values]
return ConvertValuesWithMetadata(batch_data, options=options) | Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: base.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values. | ConvertValues | python | google/grr | grr/server/grr_response_server/export.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export.py | Apache-2.0 |
def SplitEmailsAndAppendEmailDomain(self, address_list):
"""Splits a string of comma-separated emails, appending default domain."""
result = []
# Process email addresses, and build up a list.
if isinstance(address_list, rdf_standard.DomainEmailAddress):
address_list = [str(address_list)]
elif isinstance(address_list, str):
address_list = [address for address in address_list.split(",") if address]
for address in address_list:
result.append(self.AddEmailDomain(address))
return result | Splits a string of comma-separated emails, appending default domain. | SplitEmailsAndAppendEmailDomain | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def SendEmail(
self,
to_addresses,
from_address,
subject,
message,
attachments=None,
is_html=True,
cc_addresses=None,
message_id=None,
headers=None,
):
"""Sends an email."""
raise NotImplementedError() | Sends an email. | SendEmail | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def SendEmail(
self,
to_addresses,
from_address,
subject,
message,
attachments=None,
is_html=True,
cc_addresses=None,
message_id=None,
headers=None,
):
"""This method sends an email notification.
Args:
to_addresses: [email protected] string, list of addresses as csv string,
or rdf_standard.DomainEmailAddress
from_address: [email protected] string
subject: email subject string
message: message contents string, as HTML or plain text
attachments: iterable of filename string and file data tuples, e.g.
{"/file/name/string": filedata}
is_html: true if message is in HTML format
cc_addresses: [email protected] string, or list of addresses as csv
string
message_id: smtp message_id. Used to enable conversation threading
headers: dict of str-> str, headers to set
Raises:
EmailNotSentError: for problems connecting to smtp server.
"""
headers = headers or {}
msg = MIMEMultipart("alternative")
if is_html:
text = self.RemoveHtmlTags(message)
part1 = MIMEText(text, "plain")
msg.attach(part1)
part2 = MIMEText(message, "html")
msg.attach(part2)
else:
part1 = MIMEText(message, "plain")
msg.attach(part1)
if attachments:
for file_name, file_data in attachments.items():
part = MIMEBase("application", "octet-stream")
part.set_payload(file_data)
encoders.encode_base64(part)
part.add_header(
"Content-Disposition", 'attachment; filename="%s"' % file_name
)
msg.attach(part)
msg["Subject"] = subject
from_address = self.AddEmailDomain(from_address)
to_addresses = self.SplitEmailsAndAppendEmailDomain(to_addresses)
cc_addresses = self.SplitEmailsAndAppendEmailDomain(cc_addresses or "")
msg["From"] = from_address
msg["To"] = ",".join(to_addresses)
if cc_addresses:
msg["CC"] = ",".join(cc_addresses)
if message_id:
msg.add_header("Message-ID", message_id)
for header, value in headers.items():
msg.add_header(header, value)
try:
s = smtplib.SMTP(
config.CONFIG["Worker.smtp_server"],
int(config.CONFIG["Worker.smtp_port"]),
)
s.ehlo()
if config.CONFIG["Worker.smtp_starttls"]:
s.starttls()
s.ehlo()
if (
config.CONFIG["Worker.smtp_user"]
and config.CONFIG["Worker.smtp_password"]
):
s.login(
config.CONFIG["Worker.smtp_user"],
config.CONFIG["Worker.smtp_password"],
)
s.sendmail(from_address, to_addresses + cc_addresses, msg.as_string())
s.quit()
except (socket.error, smtplib.SMTPException) as e:
smtp_server = config.CONFIG["Worker.smtp_server"]
raise EmailNotSentError(
"Could not connect to SMTP server to send email. "
"Please check config option Worker.smtp_server. "
f"Currently set to {smtp_server}."
) from e | This method sends an email notification.
Args:
to_addresses: [email protected] string, list of addresses as csv string,
or rdf_standard.DomainEmailAddress
from_address: [email protected] string
subject: email subject string
message: message contents string, as HTML or plain text
attachments: iterable of filename string and file data tuples, e.g.
{"/file/name/string": filedata}
is_html: true if message is in HTML format
cc_addresses: [email protected] string, or list of addresses as csv
string
message_id: smtp message_id. Used to enable conversation threading
headers: dict of str-> str, headers to set
Raises:
EmailNotSentError: for problems connecting to smtp server. | SendEmail | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def InitializeEmailAlerterOnce():
"""Initializes e-mail alerts."""
global EMAIL_ALERTER
email_alerter_cls_name = config.CONFIG["Server.email_alerter_class"]
logging.debug("Using email alerter: %s", email_alerter_cls_name)
cls = EmailAlerterBase.GetPlugin(email_alerter_cls_name)
EMAIL_ALERTER = cls() | Initializes e-mail alerts. | InitializeEmailAlerterOnce | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def WriteBlobsWithUnknownHashes(
self,
blobs_data: Iterable[bytes],
) -> List[models_blobs.BlobID]:
"""Writes the contents of the given blobs, using their hash as BlobID.
Args:
blobs_data: An iterable of bytes objects.
Returns:
A list of blob identifiers corresponding to each written blob.
"""
blobs_ids = [models_blobs.BlobID.Of(d) for d in blobs_data]
self.WriteBlobs(dict(zip(blobs_ids, blobs_data)))
return blobs_ids | Writes the contents of the given blobs, using their hash as BlobID.
Args:
blobs_data: An iterable of bytes objects.
Returns:
A list of blob identifiers corresponding to each written blob. | WriteBlobsWithUnknownHashes | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def WriteBlobWithUnknownHash(
self,
blob_data: bytes,
) -> models_blobs.BlobID:
"""Writes the content of the given blob, using its hash as BlobID.
Args:
blob_data: Blob contents as bytes.
Returns:
A blob identifier corresponding to the written blob.
"""
return self.WriteBlobsWithUnknownHashes([blob_data])[0] | Writes the content of the given blob, using its hash as BlobID.
Args:
blob_data: Blob contents as bytes.
Returns:
A blob identifier corresponding to the written blob. | WriteBlobWithUnknownHash | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadBlob(
self,
blob_id: models_blobs.BlobID,
) -> Optional[bytes]:
"""Reads the blob contents, identified by the given BlobID.
Args:
blob_id: An identifier of the blob to read.
Returns:
Bytes corresponding to a given blob or None if such blob
does not exist.
"""
return self.ReadBlobs([blob_id])[blob_id] | Reads the blob contents, identified by the given BlobID.
Args:
blob_id: An identifier of the blob to read.
Returns:
Bytes corresponding to a given blob or None if such blob
does not exist. | ReadBlob | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def CheckBlobExists(
self,
blob_id: models_blobs.BlobID,
) -> bool:
"""Checks if a blob with a given BlobID exists.
Args:
blob_id: An identifier of the blob to check for existence.
Returns:
True if the blob exists, False otherwise.
"""
return self.CheckBlobsExist([blob_id])[blob_id] | Checks if a blob with a given BlobID exists.
Args:
blob_id: An identifier of the blob to check for existence.
Returns:
True if the blob exists, False otherwise. | CheckBlobExists | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def WriteBlobs(
self,
blob_id_data_map: Dict[models_blobs.BlobID, bytes],
) -> None:
"""Creates or overwrites blobs.
Args:
blob_id_data_map: A mapping from blob identifiers to blob data to write.
""" | Creates or overwrites blobs.
Args:
blob_id_data_map: A mapping from blob identifiers to blob data to write. | WriteBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadBlobs(
self, blob_ids: Iterable[models_blobs.BlobID]
) -> Dict[models_blobs.BlobID, Optional[bytes]]:
"""Reads all blobs, specified by blob_ids, returning their contents.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None.
""" | Reads all blobs, specified by blob_ids, returning their contents.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None. | ReadBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def CheckBlobsExist(
self,
blob_ids: Iterable[models_blobs.BlobID],
) -> Dict[models_blobs.BlobID, bool]:
"""Checks if blobs for the given identifiers already exist.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: status} where status is a boolean (True if blob exists,
False if it doesn't).
""" | Checks if blobs for the given identifiers already exist.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: status} where status is a boolean (True if blob exists,
False if it doesn't). | CheckBlobsExist | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadAndWaitForBlobs(
self,
blob_ids: Iterable[models_blobs.BlobID],
timeout: rdfvalue.Duration,
) -> Dict[models_blobs.BlobID, Optional[bytes]]:
"""Reads specified blobs, waiting and retrying if blobs do not exist yet.
Args:
blob_ids: An iterable of BlobIDs.
timeout: A rdfvalue.Duration specifying the maximum time to pass until the
last poll is conducted. The overall runtime of ReadAndWaitForBlobs can
be higher, because `timeout` is a threshold for the start (and not end)
of the last attempt at reading.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None.
"""
remaining_ids = set(blob_ids)
results = {blob_id: None for blob_id in remaining_ids}
start = rdfvalue.RDFDatetime.Now()
# TODO: Implement truncated exponential backoff.
sleep_dur = rdfvalue.Duration.From(1, rdfvalue.SECONDS)
poll_num = 0
while remaining_ids:
cur_blobs = self.ReadBlobs(list(remaining_ids))
now = rdfvalue.RDFDatetime.Now()
elapsed = now - start
poll_num += 1
for blob_id, blob in cur_blobs.items():
if blob is None:
continue
results[blob_id] = blob
remaining_ids.remove(blob_id)
BLOB_STORE_POLL_HIT_LATENCY.RecordEvent(
elapsed.ToFractional(rdfvalue.SECONDS)
)
BLOB_STORE_POLL_HIT_ITERATION.RecordEvent(poll_num)
if not remaining_ids or elapsed + sleep_dur >= timeout:
break
time.sleep(sleep_dur.ToFractional(rdfvalue.SECONDS))
return results | Reads specified blobs, waiting and retrying if blobs do not exist yet.
Args:
blob_ids: An iterable of BlobIDs.
timeout: A rdfvalue.Duration specifying the maximum time to pass until the
last poll is conducted. The overall runtime of ReadAndWaitForBlobs can
be higher, because `timeout` is a threshold for the start (and not end)
of the last attempt at reading.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None. | ReadAndWaitForBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadAndWaitForBlob(
self,
blob_id: models_blobs.BlobID,
timeout: rdfvalue.Duration,
) -> Optional[bytes]:
"""Reads the specified blobs waiting until it is available or times out.
Args:
blob_id: An identifier of the blob to read.
timeout: A timeout after which `None` is returned instead.
Returns:
Content of the requested blob or `None` if the timeout was reached.
"""
return self.ReadAndWaitForBlobs([blob_id], timeout)[blob_id] | Reads the specified blobs waiting until it is available or times out.
Args:
blob_id: An identifier of the blob to read.
timeout: A timeout after which `None` is returned instead.
Returns:
Content of the requested blob or `None` if the timeout was reached. | ReadAndWaitForBlob | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def WaitForBlobs(
self,
blob_ids: Iterable[models_blobs.BlobID],
timeout: rdfvalue.Duration,
) -> None:
"""Waits for specified blobs to appear in the database.
Args:
blob_ids: A collection of blob ids to await for.
timeout: A duration specifying the maximum amount of time to wait.
Raises:
BlobStoreTimeoutError: If the blobs are still not in the database after
the specified timeout duration has elapsed.
"""
remaining_blob_ids = set(blob_ids)
# TODO: See a TODO comment in `RunAndWaitForBlobs`.
sleep_duration = rdfvalue.Duration.From(1, rdfvalue.SECONDS)
start_time = rdfvalue.RDFDatetime.Now()
ticks = 0
while True:
blob_id_exists = self.CheckBlobsExist(remaining_blob_ids)
elapsed = rdfvalue.RDFDatetime.Now() - start_time
elapsed_secs = elapsed.ToFractional(rdfvalue.SECONDS)
ticks += 1
for blob_id, exists in blob_id_exists.items():
if not exists:
continue
remaining_blob_ids.remove(blob_id)
BLOB_STORE_POLL_HIT_LATENCY.RecordEvent(elapsed_secs)
BLOB_STORE_POLL_HIT_ITERATION.RecordEvent(ticks)
if not remaining_blob_ids:
break
if elapsed + sleep_duration >= timeout:
raise BlobStoreTimeoutError()
sleep_duration_secs = sleep_duration.ToFractional(rdfvalue.SECONDS)
time.sleep(sleep_duration_secs) | Waits for specified blobs to appear in the database.
Args:
blob_ids: A collection of blob ids to await for.
timeout: A duration specifying the maximum amount of time to wait.
Raises:
BlobStoreTimeoutError: If the blobs are still not in the database after
the specified timeout duration has elapsed. | WaitForBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def __init__(
self,
metadata: rdf_stats.MetricMetadata,
registry: prometheus_client.registry.CollectorRegistry,
):
"""Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type.
"""
self.metadata = metadata
self.fields = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs
)
field_names = [name for name, _ in self.fields]
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self.metric = prometheus_client.Counter(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry,
)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
bins = metadata.bins or [
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.75,
1,
1.5,
2,
2.5,
3,
4,
5,
6,
7,
8,
9,
10,
15,
20,
50,
100,
]
self.metric = prometheus_client.Histogram(
metadata.varname,
metadata.docstring,
labelnames=field_names,
buckets=bins,
registry=registry,
)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
self.metric = prometheus_client.Gauge(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry,
)
else:
raise ValueError("Unknown metric type: {!r}".format(metadata.metric_type)) | Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type. | __init__ | python | google/grr | grr/server/grr_response_server/prometheus_stats_collector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/prometheus_stats_collector.py | Apache-2.0 |
def _DistributionFromHistogram(metric, values_by_suffix):
"""Instantiate a rdf_stats.Distribution from a Prometheus Histogram.
Prometheus Histogram uses cumulative "buckets" lower or equal to an upper
bound. At instantiation, +Inf is implicitly appended to the upper bounds.
The delimiters [0.0, 0.1, 0.2 (, +Inf)] produce the following buckets:
Bucket "0.0" : -Inf <= values <= 0.0
Bucket "0.1" : -Inf <= values <= 0.1
Bucket "0.2" : -Inf <= values <= 0.2
Bucket "+Inf": -Inf <= values <= +Inf
Distribution uses exclusive bins greater or equal to a lower bound and
strictly lower than the next lower bound. At instantiation, -Inf is implicitly
prepended. The delimiters [(-Inf,) 0.0, 0.1, 0.2] produce the following bins:
Bin "-Inf": -Inf <= values < 0.0
Bin "0.0" : 0.0 <= values < 0.1
Bin "0.1" : 0.1 <= values < 0.2
Bin "0.2" : 0.2 <= values <= +Inf
Thus, Histogram buckets can be transformed to Distribution bins, by reading
in the same order and subtracting the value of the previous bin to remove the
cumulative sum. There is a slight incompatibility for values equal to bin
boundaries, because boundaries describe the upper bound for Prometheus and
the lower bound for our internal implementation.
Args:
metric: prometheus_stats_collector.Metric
values_by_suffix: dict of metric name suffixes and sample values lists
Returns:
rdf_stats.Distribution
Raises:
ValueError: The Histogram and metadata bin count do not match.
"""
dist = rdf_stats.Distribution(bins=list(metric.metadata.bins))
if metric.metadata.bins and len(dist.heights) != len(
values_by_suffix["_bucket"]
):
raise ValueError(
"Trying to create Distribution with {} bins, but underlying"
"Histogram has {} buckets".format(
len(dist.heights), len(values_by_suffix["_bucket"])
)
)
dist.heights = values_by_suffix["_bucket"]
# Remove cumulative sum by subtracting the value of the previous bin
for i in reversed(range(1, len(dist.heights))):
dist.heights[i] -= dist.heights[i - 1]
dist.count = values_by_suffix["_count"][0]
dist.sum = values_by_suffix["_sum"][0]
return dist | Instantiate a rdf_stats.Distribution from a Prometheus Histogram.
Prometheus Histogram uses cumulative "buckets" lower or equal to an upper
bound. At instantiation, +Inf is implicitly appended to the upper bounds.
The delimiters [0.0, 0.1, 0.2 (, +Inf)] produce the following buckets:
Bucket "0.0" : -Inf <= values <= 0.0
Bucket "0.1" : -Inf <= values <= 0.1
Bucket "0.2" : -Inf <= values <= 0.2
Bucket "+Inf": -Inf <= values <= +Inf
Distribution uses exclusive bins greater or equal to a lower bound and
strictly lower than the next lower bound. At instantiation, -Inf is implicitly
prepended. The delimiters [(-Inf,) 0.0, 0.1, 0.2] produce the following bins:
Bin "-Inf": -Inf <= values < 0.0
Bin "0.0" : 0.0 <= values < 0.1
Bin "0.1" : 0.1 <= values < 0.2
Bin "0.2" : 0.2 <= values <= +Inf
Thus, Histogram buckets can be transformed to Distribution bins, by reading
in the same order and subtracting the value of the previous bin to remove the
cumulative sum. There is a slight incompatibility for values equal to bin
boundaries, because boundaries describe the upper bound for Prometheus and
the lower bound for our internal implementation.
Args:
metric: prometheus_stats_collector.Metric
values_by_suffix: dict of metric name suffixes and sample values lists
Returns:
rdf_stats.Distribution
Raises:
ValueError: The Histogram and metadata bin count do not match. | _DistributionFromHistogram | python | google/grr | grr/server/grr_response_server/prometheus_stats_collector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/prometheus_stats_collector.py | Apache-2.0 |
def __init__(self, registry=None):
"""Instantiates a new PrometheusStatsCollector.
Args:
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry.
"""
self._metrics: Dict[Text, _Metric] = {}
if registry is None:
self._registry = prometheus_client.CollectorRegistry(auto_describe=True)
else:
self._registry = registry
super().__init__() | Instantiates a new PrometheusStatsCollector.
Args:
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry. | __init__ | python | google/grr | grr/server/grr_response_server/prometheus_stats_collector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/prometheus_stats_collector.py | Apache-2.0 |
def SignedBinaryIDFromURN(
binary_urn: rdfvalue.RDFURN,
) -> objects_pb2.SignedBinaryID:
"""Converts an AFF4 URN for a signed binary to a SignedBinaryID."""
if binary_urn.RelativeName(GetAFF4PythonHackRoot()):
return objects_pb2.SignedBinaryID(
binary_type=objects_pb2.SignedBinaryID.BinaryType.PYTHON_HACK,
path=binary_urn.RelativeName(GetAFF4PythonHackRoot()),
)
elif binary_urn.RelativeName(GetAFF4ExecutablesRoot()):
return objects_pb2.SignedBinaryID(
binary_type=objects_pb2.SignedBinaryID.BinaryType.EXECUTABLE,
path=binary_urn.RelativeName(GetAFF4ExecutablesRoot()),
)
else:
raise ValueError(
"Unable to determine type of signed binary: %s." % binary_urn
) | Converts an AFF4 URN for a signed binary to a SignedBinaryID. | SignedBinaryIDFromURN | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def _SignedBinaryURNFromID(
binary_id: objects_pb2.SignedBinaryID,
) -> rdfvalue.RDFURN:
"""Converts a SignedBinaryID to the equivalent AFF4 URN."""
binary_type = binary_id.binary_type
if binary_type == objects_pb2.SignedBinaryID.BinaryType.PYTHON_HACK:
return GetAFF4PythonHackRoot().Add(binary_id.path)
elif binary_type == objects_pb2.SignedBinaryID.BinaryType.EXECUTABLE:
return GetAFF4ExecutablesRoot().Add(binary_id.path)
else:
raise ValueError("Unknown binary type %s." % binary_type) | Converts a SignedBinaryID to the equivalent AFF4 URN. | _SignedBinaryURNFromID | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def WriteSignedBinary(
binary_urn: rdfvalue.RDFURN,
binary_content: bytes,
private_key: rdf_crypto.RSAPrivateKey,
public_key: Optional[rdf_crypto.RSAPublicKey],
chunk_size: int = 1024,
):
"""Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore.
"""
blob_references = objects_pb2.BlobReferences()
for chunk_offset in range(0, len(binary_content), chunk_size):
chunk = binary_content[chunk_offset : chunk_offset + chunk_size]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(chunk, private_key, verify_key=public_key)
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob_rdf.SerializeToBytes()
)
blob_references.items.append(
objects_pb2.BlobReference(
offset=chunk_offset,
size=len(chunk),
blob_id=bytes(blob_id),
)
)
data_store.REL_DB.WriteSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn), blob_references
) | Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore. | WriteSignedBinary | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def WriteSignedBinaryBlobs(
binary_urn: rdfvalue.RDFURN,
blobs: Iterable[jobs_pb2.SignedBlob],
) -> None:
"""Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
"""
blob_references = objects_pb2.BlobReferences()
current_offset = 0
for blob in blobs:
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob.SerializeToString()
)
blob_references.items.append(
objects_pb2.BlobReference(
offset=current_offset,
size=len(blob.data),
blob_id=bytes(blob_id),
)
)
current_offset += len(blob.data)
data_store.REL_DB.WriteSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn), blob_references
) | Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore. | WriteSignedBinaryBlobs | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def DeleteSignedBinary(binary_urn: rdfvalue.RDFURN):
"""Deletes the binary with the given urn from the datastore.
Args:
binary_urn: RDFURN that serves as a unique identifier for the binary.
Raises:
SignedBinaryNotFoundError: If the signed binary does not exist.
"""
try:
data_store.REL_DB.ReadSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn)
)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(binary_urn)
data_store.REL_DB.DeleteSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn)
) | Deletes the binary with the given urn from the datastore.
Args:
binary_urn: RDFURN that serves as a unique identifier for the binary.
Raises:
SignedBinaryNotFoundError: If the signed binary does not exist. | DeleteSignedBinary | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchURNsForAllSignedBinaries() -> Sequence[rdfvalue.RDFURN]:
"""Returns URNs for all signed binaries in the datastore."""
return [
_SignedBinaryURNFromID(i)
for i in data_store.REL_DB.ReadIDsForAllSignedBinaries()
] | Returns URNs for all signed binaries in the datastore. | FetchURNsForAllSignedBinaries | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchBlobsForSignedBinaryByID(
binary_id: objects_pb2.SignedBinaryID,
) -> Tuple[Iterator[rdf_crypto.SignedBlob], rdfvalue.RDFDatetime]:
"""Retrieves blobs for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
"""
try:
references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences(
binary_id
)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(_SignedBinaryURNFromID(binary_id))
blob_ids = [models_blob.BlobID(r.blob_id) for r in references.items]
raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids)
blobs = (
rdf_crypto.SignedBlob.FromSerializedBytes(raw_blob)
for raw_blob in raw_blobs
)
return blobs, timestamp | Retrieves blobs for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists. | FetchBlobsForSignedBinaryByID | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchBlobForSignedBinaryByID(
binary_id: objects_pb2.SignedBinaryID,
blob_index: int,
) -> rdf_crypto.SignedBlob:
"""Retrieves a single blob for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
blob_index: Index of the blob to read.
Returns:
Signed blob.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
BlobIndexOutOfBoundsError: If requested blob index is too big.
"""
if blob_index < 0:
raise ValueError("blob_index must be >= 0.")
try:
references, _ = data_store.REL_DB.ReadSignedBinaryReferences(binary_id)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(_SignedBinaryURNFromID(binary_id))
try:
blob_id_bytes = references.items[blob_index].blob_id
except IndexError:
raise BlobIndexOutOfBoundsError(f"{blob_index} >= {len(references.items)}")
blob_id = models_blob.BlobID(blob_id_bytes)
raw_blob = data_store.BLOBS.ReadBlob(blob_id)
return rdf_crypto.SignedBlob.FromSerializedBytes(raw_blob) | Retrieves a single blob for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
blob_index: Index of the blob to read.
Returns:
Signed blob.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
BlobIndexOutOfBoundsError: If requested blob index is too big. | FetchBlobForSignedBinaryByID | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchBlobsForSignedBinaryByURN(
binary_urn: rdfvalue.RDFURN,
) -> Tuple[Iterator[rdf_crypto.SignedBlob], rdfvalue.RDFDatetime]:
"""Retrieves blobs for the given binary from the datastore.
Args:
binary_urn: RDFURN that uniquely identifies the binary.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
"""
return FetchBlobsForSignedBinaryByID(SignedBinaryIDFromURN(binary_urn)) | Retrieves blobs for the given binary from the datastore.
Args:
binary_urn: RDFURN that uniquely identifies the binary.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists. | FetchBlobsForSignedBinaryByURN | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchSizeOfSignedBinary(
binary_id_or_urn: Union[rdf_objects.SignedBinaryID, rdfvalue.RDFURN],
) -> int:
"""Returns the size of the given binary (in bytes).
Args:
binary_id_or_urn: SignedBinaryID or RDFURN that uniquely identifies the
binary.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
"""
if isinstance(binary_id_or_urn, rdfvalue.RDFURN):
binary_id = SignedBinaryIDFromURN(binary_id_or_urn)
else:
binary_id = binary_id_or_urn
try:
references, _ = data_store.REL_DB.ReadSignedBinaryReferences(binary_id)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(binary_id)
last_reference = references.items[-1]
return last_reference.offset + last_reference.size | Returns the size of the given binary (in bytes).
Args:
binary_id_or_urn: SignedBinaryID or RDFURN that uniquely identifies the
binary.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists. | FetchSizeOfSignedBinary | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def StreamSignedBinaryContents(
blob_iterator: Iterator[rdf_crypto.SignedBlob], chunk_size: int = 1024
) -> Generator[bytes, None, None]:
"""Yields the contents of the given binary in chunks of the given size.
Args:
blob_iterator: An Iterator over all the binary's blobs.
chunk_size: Size, in bytes, of the chunks to yield.
"""
all_blobs_read = False
byte_buffer = io.BytesIO()
while not all_blobs_read or byte_buffer.getvalue():
while not all_blobs_read and byte_buffer.tell() < chunk_size:
try:
blob = next(blob_iterator)
except StopIteration:
all_blobs_read = True
break
byte_buffer.write(blob.data)
if byte_buffer.tell() > 0:
# Yield a chunk of the signed binary and reset the buffer to contain
# only data that hasn't been sent yet.
byte_buffer.seek(0)
yield byte_buffer.read(chunk_size)
byte_buffer = io.BytesIO(byte_buffer.read())
byte_buffer.seek(0, io.SEEK_END) | Yields the contents of the given binary in chunks of the given size.
Args:
blob_iterator: An Iterator over all the binary's blobs.
chunk_size: Size, in bytes, of the chunks to yield. | StreamSignedBinaryContents | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def testArtifactsValidate(self, registry):
"""Check each artifact we have passes validation."""
registry.AddFileSource(self.test_artifacts_file)
for artifact in registry.GetArtifacts():
ar.Validate(artifact) | Check each artifact we have passes validation. | testArtifactsValidate | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def testArtifactsDependencies(self, registry):
"""Check artifact dependencies work."""
registry.AddFileSource(self.test_artifacts_file)
art_obj = registry.GetArtifact("TestAggregationArtifactDeps")
deps = ar.GetArtifactDependencies(art_obj)
self.assertCountEqual(list(deps), ["TestAggregationArtifact"])
deps = ar.GetArtifactDependencies(art_obj, recursive=True)
self.assertCountEqual(
list(deps),
["TestOSAgnostic", "TestCmdArtifact", "TestAggregationArtifact"],
)
# Test recursive loop.
# Make sure we use the registry registered version of the class.
source = art_obj.sources[0]
backup = source.attributes["names"]
try:
source.attributes["names"] = ["TestAggregationArtifactDeps"]
with self.assertRaises(RuntimeError) as e:
ar.GetArtifactDependencies(art_obj, recursive=True)
self.assertIn("artifact recursion depth", str(e.exception))
finally:
source.attributes["names"] = backup # Restore old source. | Check artifact dependencies work. | testArtifactsDependencies | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def testUserMergeWindows(self):
"""Check Windows users are accurately merged."""
kb = rdf_client.KnowledgeBase()
self.assertEmpty(kb.users)
kb.MergeOrAddUser(rdf_client.User(sid="1234"))
self.assertLen(kb.users, 1)
kb.MergeOrAddUser(rdf_client.User(sid="5678", username="test1"))
self.assertLen(kb.users, 2)
_, conflicts = kb.MergeOrAddUser(
rdf_client.User(sid="5678", username="test2")
)
self.assertLen(kb.users, 2)
self.assertEqual(conflicts[0], ("username", "test1", "test2"))
self.assertEqual(kb.GetUser(sid="5678").username, "test2")
# This should merge on user name as we have no other data.
kb.MergeOrAddUser(rdf_client.User(username="test2", homedir="a"))
self.assertLen(kb.users, 2)
# This should create a new user since the sid is different.
new_attrs, conflicts = kb.MergeOrAddUser(
rdf_client.User(username="test2", sid="12345", temp="/blah")
)
self.assertLen(kb.users, 3)
self.assertCountEqual(
new_attrs, ["users.username", "users.temp", "users.sid"]
)
self.assertEqual(conflicts, []) | Check Windows users are accurately merged. | testUserMergeWindows | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def testUserMergeLinux(self):
"""Check Linux users are accurately merged."""
kb = rdf_client.KnowledgeBase()
self.assertEmpty(kb.users)
kb.MergeOrAddUser(rdf_client.User(username="blake", last_logon=1111))
self.assertLen(kb.users, 1)
# This should merge since the username is the same.
kb.MergeOrAddUser(rdf_client.User(uid="12", username="blake"))
self.assertLen(kb.users, 1)
# This should create a new record because the uid is different
kb.MergeOrAddUser(
rdf_client.User(
username="blake", uid="13", desktop="/home/blake/Desktop"
)
)
self.assertLen(kb.users, 2)
kb.MergeOrAddUser(
rdf_client.User(
username="newblake", uid="14", desktop="/home/blake/Desktop"
)
)
self.assertLen(kb.users, 3)
# Check merging where we don't specify uid works
new_attrs, conflicts = kb.MergeOrAddUser(
rdf_client.User(username="newblake", desktop="/home/blakey/Desktop")
)
self.assertLen(kb.users, 3)
self.assertCountEqual(new_attrs, ["users.username", "users.desktop"])
self.assertCountEqual(
conflicts, [("desktop", "/home/blake/Desktop", "/home/blakey/Desktop")]
) | Check Linux users are accurately merged. | testUserMergeLinux | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def Start(self, process_fn: Callable[[common_pb2.Message], None]) -> None:
"""Start the (asynchronous) subscriber.
Args:
process_fn: message-processing callback; all messages received from
Fleetspeak are passed to this function.
Multiple message-receiving and processing threads will be spawned in the
background, as per the config var `Server.fleetspeak_cps_concurrency.
"""
def _PubsubCallback(cps_msg: pubsub_v1.subscriber.message.Message) -> None:
# Using broad Exception catching here because, at this point, any error
# is unrecoverable. This code is run by some thread spawned by the
# google-cloud lib; any uncaught exception would just crash that thread.
try:
fs_msg = common_pb2.Message.FromString(cps_msg.data)
except Exception as e: # pylint: disable=broad-exception-caught
# Any error in message deserialization is final - we don't know how to
# handle the message. Log the error and drop the message permanently.
logging.exception(
"Dropping malformed CPS message from Fleetspeak: %s", e
)
cps_msg.ack()
return
try:
process_fn(fs_msg)
except Exception as e: # pylint: disable=broad-exception-caught
# A message processing error might be temporary (i.e. may be caused by
# some temporary condition). Mark the message as NACK, so that it will
# be redelivered at a later time.
logging.exception("Exception during CPS message processing: %s", e)
cps_msg.nack()
else:
cps_msg.ack()
self._client = pubsub_v1.SubscriberClient()
sub_path = self._client.subscription_path(self._project, self._subscription)
for i in range(self._concurrency):
logging.info(
"Starting Cloud Pub/Sub subscriber %d/%d", i + 1, self._concurrency
)
fut = self._client.subscribe(sub_path, callback=_PubsubCallback)
self._sub_futures.append(fut) | Start the (asynchronous) subscriber.
Args:
process_fn: message-processing callback; all messages received from
Fleetspeak are passed to this function.
Multiple message-receiving and processing threads will be spawned in the
background, as per the config var `Server.fleetspeak_cps_concurrency. | Start | python | google/grr | grr/server/grr_response_server/fleetspeak_cps.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_cps.py | Apache-2.0 |
def Stop(self) -> None:
"""Stop the (asynchronous) subscriber.
This will block until all message-processing threads shut down.
"""
for fut in self._sub_futures:
fut.cancel()
for fut in self._sub_futures:
fut.result()
self._client = None
self._sub_futures = [] | Stop the (asynchronous) subscriber.
This will block until all message-processing threads shut down. | Stop | python | google/grr | grr/server/grr_response_server/fleetspeak_cps.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_cps.py | Apache-2.0 |
def WaitUntil(self, condition_cb, timeout=5):
"""Wait a fixed time until the condition is true."""
for _ in range(int(timeout / self.sleep_time)):
res = condition_cb()
if res:
return res
time.sleep(self.sleep_time)
raise RuntimeError("Timeout exceeded. Condition not true") | Wait a fixed time until the condition is true. | WaitUntil | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testThreadCreation(self):
"""Ensure the thread pool started the minimum number of threads."""
self.assertEqual(
self.Count("pool-testThreadCreation"), self.NUMBER_OF_THREADS
) | Ensure the thread pool started the minimum number of threads. | testThreadCreation | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testStopping(self):
"""Tests if all worker threads terminate if the thread pool is stopped."""
self.assertEqual(self.Count("pool-testStopping"), self.NUMBER_OF_THREADS)
self.test_pool.Stop()
self.assertEqual(self.Count("pool-testStopping"), 0)
self.test_pool.Start()
self.assertEqual(self.Count("pool-testStopping"), self.NUMBER_OF_THREADS)
self.test_pool.Stop()
self.assertEqual(self.Count("pool-testStopping"), 0) | Tests if all worker threads terminate if the thread pool is stopped. | testStopping | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testRunTasks(self):
"""Test for running jobs on the thread pool.
This runs 1500 tasks on the ThreadPool and waits for them to
complete.
"""
# Tests if calling Join on an empty ThreadPool works.
self.test_pool.Join()
self.lock = threading.Lock()
def Insert(list_obj, element):
with self.lock:
list_obj.append(element)
test_list = []
for i in range(self.NUMBER_OF_TASKS):
self.test_pool.AddTask(Insert, (test_list, i))
self.test_pool.Join()
test_list.sort()
self.assertEqual(list(range(self.NUMBER_OF_TASKS)), test_list) | Test for running jobs on the thread pool.
This runs 1500 tasks on the ThreadPool and waits for them to
complete. | testRunTasks | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def IRaise(some_obj):
"""This method just raises an exception."""
with self.lock:
# This simulates an error by calling a non-existent function.
some_obj.process() | This method just raises an exception. | testRunRaisingTask.IRaise | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testRunRaisingTask(self):
"""Tests the behavior of the pool if a task throws an exception."""
self.lock = threading.Lock()
def IRaise(some_obj):
"""This method just raises an exception."""
with self.lock:
# This simulates an error by calling a non-existent function.
some_obj.process()
self.exception_args = []
def MockException(*args):
self.exception_args = args
with self.assertStatsCounterDelta(
2, threadpool.THREADPOOL_TASK_EXCEPTIONS, fields=[self.test_pool.name]
):
with mock.patch.object(logging, "exception", MockException):
self.test_pool.AddTask(IRaise, (None,), "Raising")
self.test_pool.AddTask(IRaise, (None,), "Raising")
self.test_pool.Join()
# Check that an exception is raised.
self.assertIn("exception in worker thread", self.exception_args[0])
self.assertEqual(self.exception_args[1], "Raising") | Tests the behavior of the pool if a task throws an exception. | testRunRaisingTask | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.