code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def run(self):
"""This overrides the Thread.run method.
This method checks in an endless loop if new tasks are available
in the queue and processes them.
"""
while True:
if self.pool.name:
self.idle = True
try:
# Wait 60 seconds for a message, otherwise exit. This ensures that the
# threadpool will be trimmed down when load is light.
task = self._queue.get(timeout=60)
if self.pool.name:
self.idle = False
try:
# The pool told us to quit, likely because it is stopping.
if task == STOP_MESSAGE:
return
self.ProcessTask(*task)
finally:
self._queue.task_done()
except queue.Empty:
if self._RemoveFromPool():
return
# Try to trim old threads down when they get too old. This helps the
# thread pool size to shrink, even when it is not idle (e.g. if it is CPU
# bound) since threads are forced to exit, but new threads will not be
# created if the utilization is too high - resulting in a reduction of
# threadpool size under CPU load.
if time.time() - self.started > 600 and self._RemoveFromPool():
return | This overrides the Thread.run method.
This method checks in an endless loop if new tasks are available
in the queue and processes them. | run | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Factory(cls, name, min_threads, max_threads=None):
"""Creates a new thread pool with the given name.
If the thread pool of this name already exist, we just return the existing
one. This allows us to have different pools with different characteristics
used by different parts of the code, at the same time.
Args:
name: The name of the required pool.
min_threads: The number of threads in the pool.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Returns:
A threadpool instance.
"""
with cls.factory_lock:
result = cls.POOLS.get(name)
if result is None:
cls.POOLS[name] = result = cls(
name, min_threads, max_threads=max_threads
)
return result | Creates a new thread pool with the given name.
If the thread pool of this name already exist, we just return the existing
one. This allows us to have different pools with different characteristics
used by different parts of the code, at the same time.
Args:
name: The name of the required pool.
min_threads: The number of threads in the pool.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Returns:
A threadpool instance. | Factory | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def __init__(self, name, min_threads, max_threads=None):
"""This creates a new thread pool using min_threads workers.
Args:
name: A prefix to identify this thread pool in the exported stats.
min_threads: The minimum number of worker threads this pool should have.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Raises:
threading.ThreadError: If no threads can be spawned at all, ThreadError
will be raised.
DuplicateThreadpoolError: This exception is raised if a thread pool with
the desired name already exists.
"""
self.min_threads = min_threads
if max_threads is None or max_threads < min_threads:
max_threads = min_threads
self.max_threads = max_threads
self._queue = queue.Queue(maxsize=max_threads)
self.name = name
self.started = False
self.process = psutil.Process()
# A reference for all our workers. Keys are thread names, and values are the
# _WorkerThread instance.
self._workers = {}
# Read-only copy of self._workers that is thread-safe for reading.
self._workers_ro_copy = {}
self.lock = threading.RLock()
if not self.name:
raise ValueError("Unnamed thread pools not allowed.")
if self.name in self.POOLS:
raise DuplicateThreadpoolError(
"A thread pool with the name %s already exists." % name
)
THREADPOOL_OUTSTANDING_TASKS.SetCallback(
self._queue.qsize, fields=[self.name]
)
THREADPOOL_THREADS.SetCallback(lambda: len(self), fields=[self.name])
THREADPOOL_CPU_USE.SetCallback(self.CPUUsage, fields=[self.name]) | This creates a new thread pool using min_threads workers.
Args:
name: A prefix to identify this thread pool in the exported stats.
min_threads: The minimum number of worker threads this pool should have.
max_threads: The maximum number of threads to grow the pool to. If not set
we do not grow the pool.
Raises:
threading.ThreadError: If no threads can be spawned at all, ThreadError
will be raised.
DuplicateThreadpoolError: This exception is raised if a thread pool with
the desired name already exists. | __init__ | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Start(self):
"""This starts the worker threads."""
if not self.started:
self.started = True
for _ in range(self.min_threads):
self._AddWorker() | This starts the worker threads. | Start | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Stop(self, join_timeout=600):
"""This stops all the worker threads."""
if not self.started:
logging.warning("Tried to stop a thread pool that was not running.")
return
# Remove all workers from the pool.
workers = list(self._workers.values())
self._workers = {}
self._workers_ro_copy = {}
# Send a stop message to all the workers. We need to be careful here to not
# send messages while we are still counting. If threads that haven't been
# counted yet pick up a message and exit, the count will be off and the
# shutdown process will deadlock.
stop_messages_needed = 0
for worker in workers:
if worker.is_alive():
stop_messages_needed += 1
for _ in range(stop_messages_needed):
self._queue.put(STOP_MESSAGE)
self.started = False
self.Join()
# Wait for the threads to all exit now.
for worker in workers:
worker.join(join_timeout)
if worker.is_alive():
raise RuntimeError("Threadpool worker did not finish in time.") | This stops all the worker threads. | Stop | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Join(self):
"""Waits until all outstanding tasks are completed."""
for _ in range(self.JOIN_TIMEOUT_DECISECONDS):
if self._queue.empty() and not self.busy_threads:
return
time.sleep(0.1)
raise ValueError("Timeout during Join() for threadpool %s." % self.name) | Waits until all outstanding tasks are completed. | Join | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def __init__(
self,
batch_size=1000,
threadpool_prefix="batch_processor",
threadpool_size=10,
):
"""BatchProcessor constructor.
Args:
batch_size: All the values will be processed in batches of this size.
threadpool_prefix: Prefix that will be used in thread pool's threads
names.
threadpool_size: Size of a thread pool that will be used. If
threadpool_size is 0, no threads will be used and all conversions will
be done in the current thread.
"""
super().__init__()
self.batch_size = batch_size
self.threadpool_prefix = threadpool_prefix
self.threadpool_size = threadpool_size | BatchProcessor constructor.
Args:
batch_size: All the values will be processed in batches of this size.
threadpool_prefix: Prefix that will be used in thread pool's threads
names.
threadpool_size: Size of a thread pool that will be used. If
threadpool_size is 0, no threads will be used and all conversions will
be done in the current thread. | __init__ | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def ConvertBatch(self, batch):
"""ConvertBatch is called for every batch to do the conversion.
Args:
batch: Batch to convert.
Returns:
List with converted values.
"""
raise NotImplementedError() | ConvertBatch is called for every batch to do the conversion.
Args:
batch: Batch to convert.
Returns:
List with converted values. | ConvertBatch | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def Convert(self, values, start_index=0, end_index=None):
"""Converts given collection to exported values.
This method uses a threadpool to do the conversion in parallel. It
blocks for up to one hour until everything is converted.
Args:
values: Iterable object with values to convert.
start_index: Start from this index in the collection.
end_index: Finish processing on the (index - 1) element of the collection.
If None, work till the end of the collection.
Returns:
Nothing. ConvertedBatch() should handle the results.
"""
if not values:
return
try:
total_batch_count = len(values) // self.batch_size
except TypeError:
total_batch_count = -1
pool = ThreadPool.Factory(self.threadpool_prefix, self.threadpool_size)
val_iterator = itertools.islice(values, start_index, end_index)
pool.Start()
try:
for batch_index, batch in enumerate(
collection.Batch(val_iterator, self.batch_size)
):
logging.debug(
"Processing batch %d out of %d", batch_index, total_batch_count
)
pool.AddTask(
target=self.ConvertBatch,
args=(batch,),
name="batch_%d" % batch_index,
inline=False,
)
finally:
pool.Stop(join_timeout=3600) | Converts given collection to exported values.
This method uses a threadpool to do the conversion in parallel. It
blocks for up to one hour until everything is converted.
Args:
values: Iterable object with values to convert.
start_index: Start from this index in the collection.
end_index: Finish processing on the (index - 1) element of the collection.
If None, work till the end of the collection.
Returns:
Nothing. ConvertedBatch() should handle the results. | Convert | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def _TestHeartBeat(self, cron_class, cron_started_event, heartbeat_event):
"""Helper for heartbeat tests."""
cron_name = cron_class.__name__
cronjobs.ScheduleSystemCronJobs(names=[cron_name])
cron_manager = cronjobs.CronManager()
jobs = cronjobs.CronManager().ListJobs()
self.assertIn(cron_name, jobs)
try:
cron_manager.RunOnce()
cron_started_event.wait()
runs = cron_manager.ReadJobRuns(cron_name)
self.assertLen(runs, 1)
self.assertEqual(
runs[0].status, rdf_cronjobs.CronJobRun.CronJobRunStatus.RUNNING
)
finally:
heartbeat_event.set()
cron_manager._GetThreadPool().Join()
runs = cron_manager.ReadJobRuns(cron_name)
self.assertLen(runs, 1)
if cron_class.allow_overruns:
expected_status = rdf_cronjobs.CronJobRun.CronJobRunStatus.FINISHED
else:
expected_status = (
rdf_cronjobs.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED
)
self.assertEqual(runs[0].status, expected_status) | Helper for heartbeat tests. | _TestHeartBeat | python | google/grr | grr/server/grr_response_server/cronjobs_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs_test.py | Apache-2.0 |
def GetMetadata(client_id, client_full_info):
"""Builds ExportedMetadata object for a given client id and ClientFullInfo."""
metadata = base.ExportedMetadata()
last_snapshot = None
if client_full_info.HasField("last_snapshot"):
last_snapshot = client_full_info.last_snapshot
metadata.client_urn = client_id
metadata.client_age = client_full_info.metadata.first_seen
if last_snapshot is not None:
kb = client_full_info.last_snapshot.knowledge_base
os_release = last_snapshot.os_release
os_version = last_snapshot.os_version
metadata.hostname = kb.fqdn
metadata.os = kb.os
metadata.os_release = os_release
metadata.os_version = os_version
metadata.usernames = ",".join(user.username for user in kb.users)
addresses = last_snapshot.GetMacAddresses()
if addresses:
metadata.mac_address = "\n".join(last_snapshot.GetMacAddresses())
metadata.hardware_info = last_snapshot.hardware_info
metadata.kernel_version = last_snapshot.kernel
ci = last_snapshot.cloud_instance
if ci is not None:
if ci.cloud_type == ci.InstanceType.AMAZON:
metadata.cloud_instance_type = metadata.CloudInstanceType.AMAZON
metadata.cloud_instance_id = ci.amazon.instance_id
elif ci.cloud_type == ci.InstanceType.GOOGLE:
metadata.cloud_instance_type = metadata.CloudInstanceType.GOOGLE
metadata.cloud_instance_id = ci.google.unique_id
system_labels = set()
user_labels = set()
for l in client_full_info.labels:
if l.owner == "GRR":
system_labels.add(l.name)
else:
user_labels.add(l.name)
metadata.labels = ",".join(sorted(system_labels | user_labels))
metadata.system_labels = ",".join(sorted(system_labels))
metadata.user_labels = ",".join(sorted(user_labels))
return metadata | Builds ExportedMetadata object for a given client id and ClientFullInfo. | GetMetadata | python | google/grr | grr/server/grr_response_server/export.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export.py | Apache-2.0 |
def ConvertValuesWithMetadata(metadata_value_pairs, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
metadata_value_pairs: Tuples of (metadata, rdf_value), where metadata is an
instance of ExportedMetadata and rdf_value is an RDFValue subclass
instance to be exported.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Yields:
Converted values. Converted values may be of different types.
Raises:
NoConverterFound: in case no suitable converters were found for a value in
metadata_value_pairs. This error is only raised after
all values in metadata_value_pairs are attempted to be
converted. If there are multiple value types that could
not be converted because of the lack of corresponding
converters, only the last one will be specified in the
exception message.
"""
no_converter_found_error = None
metadata_value_groups = collection.Group(
metadata_value_pairs, lambda pair: pair[1].__class__.__name__
)
for metadata_values_group in metadata_value_groups.values():
_, first_value = metadata_values_group[0]
converters_classes = export_converters_registry.GetConvertersByValue(
first_value
)
if not converters_classes:
no_converter_found_error = "No converters found for value: %s" % str(
first_value
)
continue
converters = [cls(options) for cls in converters_classes]
for converter in converters:
for result in converter.BatchConvert(metadata_values_group):
yield result
if no_converter_found_error is not None:
raise NoConverterFound(no_converter_found_error) | Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
metadata_value_pairs: Tuples of (metadata, rdf_value), where metadata is an
instance of ExportedMetadata and rdf_value is an RDFValue subclass
instance to be exported.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Yields:
Converted values. Converted values may be of different types.
Raises:
NoConverterFound: in case no suitable converters were found for a value in
metadata_value_pairs. This error is only raised after
all values in metadata_value_pairs are attempted to be
converted. If there are multiple value types that could
not be converted because of the lack of corresponding
converters, only the last one will be specified in the
exception message. | ConvertValuesWithMetadata | python | google/grr | grr/server/grr_response_server/export.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export.py | Apache-2.0 |
def ConvertValues(default_metadata, values, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: base.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values.
"""
batch_data = [(default_metadata, obj) for obj in values]
return ConvertValuesWithMetadata(batch_data, options=options) | Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: base.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values. | ConvertValues | python | google/grr | grr/server/grr_response_server/export.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export.py | Apache-2.0 |
def SplitEmailsAndAppendEmailDomain(self, address_list):
"""Splits a string of comma-separated emails, appending default domain."""
result = []
# Process email addresses, and build up a list.
if isinstance(address_list, rdf_standard.DomainEmailAddress):
address_list = [str(address_list)]
elif isinstance(address_list, str):
address_list = [address for address in address_list.split(",") if address]
for address in address_list:
result.append(self.AddEmailDomain(address))
return result | Splits a string of comma-separated emails, appending default domain. | SplitEmailsAndAppendEmailDomain | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def SendEmail(
self,
to_addresses,
from_address,
subject,
message,
attachments=None,
is_html=True,
cc_addresses=None,
message_id=None,
headers=None,
):
"""Sends an email."""
raise NotImplementedError() | Sends an email. | SendEmail | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def SendEmail(
self,
to_addresses,
from_address,
subject,
message,
attachments=None,
is_html=True,
cc_addresses=None,
message_id=None,
headers=None,
):
"""This method sends an email notification.
Args:
to_addresses: [email protected] string, list of addresses as csv string,
or rdf_standard.DomainEmailAddress
from_address: [email protected] string
subject: email subject string
message: message contents string, as HTML or plain text
attachments: iterable of filename string and file data tuples, e.g.
{"/file/name/string": filedata}
is_html: true if message is in HTML format
cc_addresses: [email protected] string, or list of addresses as csv
string
message_id: smtp message_id. Used to enable conversation threading
headers: dict of str-> str, headers to set
Raises:
EmailNotSentError: for problems connecting to smtp server.
"""
headers = headers or {}
msg = MIMEMultipart("alternative")
if is_html:
text = self.RemoveHtmlTags(message)
part1 = MIMEText(text, "plain")
msg.attach(part1)
part2 = MIMEText(message, "html")
msg.attach(part2)
else:
part1 = MIMEText(message, "plain")
msg.attach(part1)
if attachments:
for file_name, file_data in attachments.items():
part = MIMEBase("application", "octet-stream")
part.set_payload(file_data)
encoders.encode_base64(part)
part.add_header(
"Content-Disposition", 'attachment; filename="%s"' % file_name
)
msg.attach(part)
msg["Subject"] = subject
from_address = self.AddEmailDomain(from_address)
to_addresses = self.SplitEmailsAndAppendEmailDomain(to_addresses)
cc_addresses = self.SplitEmailsAndAppendEmailDomain(cc_addresses or "")
msg["From"] = from_address
msg["To"] = ",".join(to_addresses)
if cc_addresses:
msg["CC"] = ",".join(cc_addresses)
if message_id:
msg.add_header("Message-ID", message_id)
for header, value in headers.items():
msg.add_header(header, value)
try:
s = smtplib.SMTP(
config.CONFIG["Worker.smtp_server"],
int(config.CONFIG["Worker.smtp_port"]),
)
s.ehlo()
if config.CONFIG["Worker.smtp_starttls"]:
s.starttls()
s.ehlo()
if (
config.CONFIG["Worker.smtp_user"]
and config.CONFIG["Worker.smtp_password"]
):
s.login(
config.CONFIG["Worker.smtp_user"],
config.CONFIG["Worker.smtp_password"],
)
s.sendmail(from_address, to_addresses + cc_addresses, msg.as_string())
s.quit()
except (socket.error, smtplib.SMTPException) as e:
smtp_server = config.CONFIG["Worker.smtp_server"]
raise EmailNotSentError(
"Could not connect to SMTP server to send email. "
"Please check config option Worker.smtp_server. "
f"Currently set to {smtp_server}."
) from e | This method sends an email notification.
Args:
to_addresses: [email protected] string, list of addresses as csv string,
or rdf_standard.DomainEmailAddress
from_address: [email protected] string
subject: email subject string
message: message contents string, as HTML or plain text
attachments: iterable of filename string and file data tuples, e.g.
{"/file/name/string": filedata}
is_html: true if message is in HTML format
cc_addresses: [email protected] string, or list of addresses as csv
string
message_id: smtp message_id. Used to enable conversation threading
headers: dict of str-> str, headers to set
Raises:
EmailNotSentError: for problems connecting to smtp server. | SendEmail | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def InitializeEmailAlerterOnce():
"""Initializes e-mail alerts."""
global EMAIL_ALERTER
email_alerter_cls_name = config.CONFIG["Server.email_alerter_class"]
logging.debug("Using email alerter: %s", email_alerter_cls_name)
cls = EmailAlerterBase.GetPlugin(email_alerter_cls_name)
EMAIL_ALERTER = cls() | Initializes e-mail alerts. | InitializeEmailAlerterOnce | python | google/grr | grr/server/grr_response_server/email_alerts.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/email_alerts.py | Apache-2.0 |
def WriteBlobsWithUnknownHashes(
self,
blobs_data: Iterable[bytes],
) -> List[models_blobs.BlobID]:
"""Writes the contents of the given blobs, using their hash as BlobID.
Args:
blobs_data: An iterable of bytes objects.
Returns:
A list of blob identifiers corresponding to each written blob.
"""
blobs_ids = [models_blobs.BlobID.Of(d) for d in blobs_data]
self.WriteBlobs(dict(zip(blobs_ids, blobs_data)))
return blobs_ids | Writes the contents of the given blobs, using their hash as BlobID.
Args:
blobs_data: An iterable of bytes objects.
Returns:
A list of blob identifiers corresponding to each written blob. | WriteBlobsWithUnknownHashes | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def WriteBlobWithUnknownHash(
self,
blob_data: bytes,
) -> models_blobs.BlobID:
"""Writes the content of the given blob, using its hash as BlobID.
Args:
blob_data: Blob contents as bytes.
Returns:
A blob identifier corresponding to the written blob.
"""
return self.WriteBlobsWithUnknownHashes([blob_data])[0] | Writes the content of the given blob, using its hash as BlobID.
Args:
blob_data: Blob contents as bytes.
Returns:
A blob identifier corresponding to the written blob. | WriteBlobWithUnknownHash | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadBlob(
self,
blob_id: models_blobs.BlobID,
) -> Optional[bytes]:
"""Reads the blob contents, identified by the given BlobID.
Args:
blob_id: An identifier of the blob to read.
Returns:
Bytes corresponding to a given blob or None if such blob
does not exist.
"""
return self.ReadBlobs([blob_id])[blob_id] | Reads the blob contents, identified by the given BlobID.
Args:
blob_id: An identifier of the blob to read.
Returns:
Bytes corresponding to a given blob or None if such blob
does not exist. | ReadBlob | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def CheckBlobExists(
self,
blob_id: models_blobs.BlobID,
) -> bool:
"""Checks if a blob with a given BlobID exists.
Args:
blob_id: An identifier of the blob to check for existence.
Returns:
True if the blob exists, False otherwise.
"""
return self.CheckBlobsExist([blob_id])[blob_id] | Checks if a blob with a given BlobID exists.
Args:
blob_id: An identifier of the blob to check for existence.
Returns:
True if the blob exists, False otherwise. | CheckBlobExists | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def WriteBlobs(
self,
blob_id_data_map: Dict[models_blobs.BlobID, bytes],
) -> None:
"""Creates or overwrites blobs.
Args:
blob_id_data_map: A mapping from blob identifiers to blob data to write.
""" | Creates or overwrites blobs.
Args:
blob_id_data_map: A mapping from blob identifiers to blob data to write. | WriteBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadBlobs(
self, blob_ids: Iterable[models_blobs.BlobID]
) -> Dict[models_blobs.BlobID, Optional[bytes]]:
"""Reads all blobs, specified by blob_ids, returning their contents.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None.
""" | Reads all blobs, specified by blob_ids, returning their contents.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None. | ReadBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def CheckBlobsExist(
self,
blob_ids: Iterable[models_blobs.BlobID],
) -> Dict[models_blobs.BlobID, bool]:
"""Checks if blobs for the given identifiers already exist.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: status} where status is a boolean (True if blob exists,
False if it doesn't).
""" | Checks if blobs for the given identifiers already exist.
Args:
blob_ids: An iterable of BlobIDs.
Returns:
A map of {blob_id: status} where status is a boolean (True if blob exists,
False if it doesn't). | CheckBlobsExist | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadAndWaitForBlobs(
self,
blob_ids: Iterable[models_blobs.BlobID],
timeout: rdfvalue.Duration,
) -> Dict[models_blobs.BlobID, Optional[bytes]]:
"""Reads specified blobs, waiting and retrying if blobs do not exist yet.
Args:
blob_ids: An iterable of BlobIDs.
timeout: A rdfvalue.Duration specifying the maximum time to pass until the
last poll is conducted. The overall runtime of ReadAndWaitForBlobs can
be higher, because `timeout` is a threshold for the start (and not end)
of the last attempt at reading.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None.
"""
remaining_ids = set(blob_ids)
results = {blob_id: None for blob_id in remaining_ids}
start = rdfvalue.RDFDatetime.Now()
# TODO: Implement truncated exponential backoff.
sleep_dur = rdfvalue.Duration.From(1, rdfvalue.SECONDS)
poll_num = 0
while remaining_ids:
cur_blobs = self.ReadBlobs(list(remaining_ids))
now = rdfvalue.RDFDatetime.Now()
elapsed = now - start
poll_num += 1
for blob_id, blob in cur_blobs.items():
if blob is None:
continue
results[blob_id] = blob
remaining_ids.remove(blob_id)
BLOB_STORE_POLL_HIT_LATENCY.RecordEvent(
elapsed.ToFractional(rdfvalue.SECONDS)
)
BLOB_STORE_POLL_HIT_ITERATION.RecordEvent(poll_num)
if not remaining_ids or elapsed + sleep_dur >= timeout:
break
time.sleep(sleep_dur.ToFractional(rdfvalue.SECONDS))
return results | Reads specified blobs, waiting and retrying if blobs do not exist yet.
Args:
blob_ids: An iterable of BlobIDs.
timeout: A rdfvalue.Duration specifying the maximum time to pass until the
last poll is conducted. The overall runtime of ReadAndWaitForBlobs can
be higher, because `timeout` is a threshold for the start (and not end)
of the last attempt at reading.
Returns:
A map of {blob_id: blob_data} where blob_data is blob bytes previously
written with WriteBlobs. If a particular blob_id is not found, the
corresponding blob_data will be None. | ReadAndWaitForBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def ReadAndWaitForBlob(
self,
blob_id: models_blobs.BlobID,
timeout: rdfvalue.Duration,
) -> Optional[bytes]:
"""Reads the specified blobs waiting until it is available or times out.
Args:
blob_id: An identifier of the blob to read.
timeout: A timeout after which `None` is returned instead.
Returns:
Content of the requested blob or `None` if the timeout was reached.
"""
return self.ReadAndWaitForBlobs([blob_id], timeout)[blob_id] | Reads the specified blobs waiting until it is available or times out.
Args:
blob_id: An identifier of the blob to read.
timeout: A timeout after which `None` is returned instead.
Returns:
Content of the requested blob or `None` if the timeout was reached. | ReadAndWaitForBlob | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def WaitForBlobs(
self,
blob_ids: Iterable[models_blobs.BlobID],
timeout: rdfvalue.Duration,
) -> None:
"""Waits for specified blobs to appear in the database.
Args:
blob_ids: A collection of blob ids to await for.
timeout: A duration specifying the maximum amount of time to wait.
Raises:
BlobStoreTimeoutError: If the blobs are still not in the database after
the specified timeout duration has elapsed.
"""
remaining_blob_ids = set(blob_ids)
# TODO: See a TODO comment in `RunAndWaitForBlobs`.
sleep_duration = rdfvalue.Duration.From(1, rdfvalue.SECONDS)
start_time = rdfvalue.RDFDatetime.Now()
ticks = 0
while True:
blob_id_exists = self.CheckBlobsExist(remaining_blob_ids)
elapsed = rdfvalue.RDFDatetime.Now() - start_time
elapsed_secs = elapsed.ToFractional(rdfvalue.SECONDS)
ticks += 1
for blob_id, exists in blob_id_exists.items():
if not exists:
continue
remaining_blob_ids.remove(blob_id)
BLOB_STORE_POLL_HIT_LATENCY.RecordEvent(elapsed_secs)
BLOB_STORE_POLL_HIT_ITERATION.RecordEvent(ticks)
if not remaining_blob_ids:
break
if elapsed + sleep_duration >= timeout:
raise BlobStoreTimeoutError()
sleep_duration_secs = sleep_duration.ToFractional(rdfvalue.SECONDS)
time.sleep(sleep_duration_secs) | Waits for specified blobs to appear in the database.
Args:
blob_ids: A collection of blob ids to await for.
timeout: A duration specifying the maximum amount of time to wait.
Raises:
BlobStoreTimeoutError: If the blobs are still not in the database after
the specified timeout duration has elapsed. | WaitForBlobs | python | google/grr | grr/server/grr_response_server/blob_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store.py | Apache-2.0 |
def __init__(
self,
metadata: rdf_stats.MetricMetadata,
registry: prometheus_client.registry.CollectorRegistry,
):
"""Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type.
"""
self.metadata = metadata
self.fields = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs
)
field_names = [name for name, _ in self.fields]
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self.metric = prometheus_client.Counter(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry,
)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
bins = metadata.bins or [
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.75,
1,
1.5,
2,
2.5,
3,
4,
5,
6,
7,
8,
9,
10,
15,
20,
50,
100,
]
self.metric = prometheus_client.Histogram(
metadata.varname,
metadata.docstring,
labelnames=field_names,
buckets=bins,
registry=registry,
)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
self.metric = prometheus_client.Gauge(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry,
)
else:
raise ValueError("Unknown metric type: {!r}".format(metadata.metric_type)) | Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type. | __init__ | python | google/grr | grr/server/grr_response_server/prometheus_stats_collector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/prometheus_stats_collector.py | Apache-2.0 |
def _DistributionFromHistogram(metric, values_by_suffix):
"""Instantiate a rdf_stats.Distribution from a Prometheus Histogram.
Prometheus Histogram uses cumulative "buckets" lower or equal to an upper
bound. At instantiation, +Inf is implicitly appended to the upper bounds.
The delimiters [0.0, 0.1, 0.2 (, +Inf)] produce the following buckets:
Bucket "0.0" : -Inf <= values <= 0.0
Bucket "0.1" : -Inf <= values <= 0.1
Bucket "0.2" : -Inf <= values <= 0.2
Bucket "+Inf": -Inf <= values <= +Inf
Distribution uses exclusive bins greater or equal to a lower bound and
strictly lower than the next lower bound. At instantiation, -Inf is implicitly
prepended. The delimiters [(-Inf,) 0.0, 0.1, 0.2] produce the following bins:
Bin "-Inf": -Inf <= values < 0.0
Bin "0.0" : 0.0 <= values < 0.1
Bin "0.1" : 0.1 <= values < 0.2
Bin "0.2" : 0.2 <= values <= +Inf
Thus, Histogram buckets can be transformed to Distribution bins, by reading
in the same order and subtracting the value of the previous bin to remove the
cumulative sum. There is a slight incompatibility for values equal to bin
boundaries, because boundaries describe the upper bound for Prometheus and
the lower bound for our internal implementation.
Args:
metric: prometheus_stats_collector.Metric
values_by_suffix: dict of metric name suffixes and sample values lists
Returns:
rdf_stats.Distribution
Raises:
ValueError: The Histogram and metadata bin count do not match.
"""
dist = rdf_stats.Distribution(bins=list(metric.metadata.bins))
if metric.metadata.bins and len(dist.heights) != len(
values_by_suffix["_bucket"]
):
raise ValueError(
"Trying to create Distribution with {} bins, but underlying"
"Histogram has {} buckets".format(
len(dist.heights), len(values_by_suffix["_bucket"])
)
)
dist.heights = values_by_suffix["_bucket"]
# Remove cumulative sum by subtracting the value of the previous bin
for i in reversed(range(1, len(dist.heights))):
dist.heights[i] -= dist.heights[i - 1]
dist.count = values_by_suffix["_count"][0]
dist.sum = values_by_suffix["_sum"][0]
return dist | Instantiate a rdf_stats.Distribution from a Prometheus Histogram.
Prometheus Histogram uses cumulative "buckets" lower or equal to an upper
bound. At instantiation, +Inf is implicitly appended to the upper bounds.
The delimiters [0.0, 0.1, 0.2 (, +Inf)] produce the following buckets:
Bucket "0.0" : -Inf <= values <= 0.0
Bucket "0.1" : -Inf <= values <= 0.1
Bucket "0.2" : -Inf <= values <= 0.2
Bucket "+Inf": -Inf <= values <= +Inf
Distribution uses exclusive bins greater or equal to a lower bound and
strictly lower than the next lower bound. At instantiation, -Inf is implicitly
prepended. The delimiters [(-Inf,) 0.0, 0.1, 0.2] produce the following bins:
Bin "-Inf": -Inf <= values < 0.0
Bin "0.0" : 0.0 <= values < 0.1
Bin "0.1" : 0.1 <= values < 0.2
Bin "0.2" : 0.2 <= values <= +Inf
Thus, Histogram buckets can be transformed to Distribution bins, by reading
in the same order and subtracting the value of the previous bin to remove the
cumulative sum. There is a slight incompatibility for values equal to bin
boundaries, because boundaries describe the upper bound for Prometheus and
the lower bound for our internal implementation.
Args:
metric: prometheus_stats_collector.Metric
values_by_suffix: dict of metric name suffixes and sample values lists
Returns:
rdf_stats.Distribution
Raises:
ValueError: The Histogram and metadata bin count do not match. | _DistributionFromHistogram | python | google/grr | grr/server/grr_response_server/prometheus_stats_collector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/prometheus_stats_collector.py | Apache-2.0 |
def __init__(self, registry=None):
"""Instantiates a new PrometheusStatsCollector.
Args:
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry.
"""
self._metrics: Dict[Text, _Metric] = {}
if registry is None:
self._registry = prometheus_client.CollectorRegistry(auto_describe=True)
else:
self._registry = registry
super().__init__() | Instantiates a new PrometheusStatsCollector.
Args:
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry. | __init__ | python | google/grr | grr/server/grr_response_server/prometheus_stats_collector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/prometheus_stats_collector.py | Apache-2.0 |
def SignedBinaryIDFromURN(
binary_urn: rdfvalue.RDFURN,
) -> objects_pb2.SignedBinaryID:
"""Converts an AFF4 URN for a signed binary to a SignedBinaryID."""
if binary_urn.RelativeName(GetAFF4PythonHackRoot()):
return objects_pb2.SignedBinaryID(
binary_type=objects_pb2.SignedBinaryID.BinaryType.PYTHON_HACK,
path=binary_urn.RelativeName(GetAFF4PythonHackRoot()),
)
elif binary_urn.RelativeName(GetAFF4ExecutablesRoot()):
return objects_pb2.SignedBinaryID(
binary_type=objects_pb2.SignedBinaryID.BinaryType.EXECUTABLE,
path=binary_urn.RelativeName(GetAFF4ExecutablesRoot()),
)
else:
raise ValueError(
"Unable to determine type of signed binary: %s." % binary_urn
) | Converts an AFF4 URN for a signed binary to a SignedBinaryID. | SignedBinaryIDFromURN | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def _SignedBinaryURNFromID(
binary_id: objects_pb2.SignedBinaryID,
) -> rdfvalue.RDFURN:
"""Converts a SignedBinaryID to the equivalent AFF4 URN."""
binary_type = binary_id.binary_type
if binary_type == objects_pb2.SignedBinaryID.BinaryType.PYTHON_HACK:
return GetAFF4PythonHackRoot().Add(binary_id.path)
elif binary_type == objects_pb2.SignedBinaryID.BinaryType.EXECUTABLE:
return GetAFF4ExecutablesRoot().Add(binary_id.path)
else:
raise ValueError("Unknown binary type %s." % binary_type) | Converts a SignedBinaryID to the equivalent AFF4 URN. | _SignedBinaryURNFromID | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def WriteSignedBinary(
binary_urn: rdfvalue.RDFURN,
binary_content: bytes,
private_key: rdf_crypto.RSAPrivateKey,
public_key: Optional[rdf_crypto.RSAPublicKey],
chunk_size: int = 1024,
):
"""Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore.
"""
blob_references = objects_pb2.BlobReferences()
for chunk_offset in range(0, len(binary_content), chunk_size):
chunk = binary_content[chunk_offset : chunk_offset + chunk_size]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(chunk, private_key, verify_key=public_key)
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob_rdf.SerializeToBytes()
)
blob_references.items.append(
objects_pb2.BlobReference(
offset=chunk_offset,
size=len(chunk),
blob_id=bytes(blob_id),
)
)
data_store.REL_DB.WriteSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn), blob_references
) | Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore. | WriteSignedBinary | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def WriteSignedBinaryBlobs(
binary_urn: rdfvalue.RDFURN,
blobs: Iterable[jobs_pb2.SignedBlob],
) -> None:
"""Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
"""
blob_references = objects_pb2.BlobReferences()
current_offset = 0
for blob in blobs:
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob.SerializeToString()
)
blob_references.items.append(
objects_pb2.BlobReference(
offset=current_offset,
size=len(blob.data),
blob_id=bytes(blob_id),
)
)
current_offset += len(blob.data)
data_store.REL_DB.WriteSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn), blob_references
) | Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore. | WriteSignedBinaryBlobs | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def DeleteSignedBinary(binary_urn: rdfvalue.RDFURN):
"""Deletes the binary with the given urn from the datastore.
Args:
binary_urn: RDFURN that serves as a unique identifier for the binary.
Raises:
SignedBinaryNotFoundError: If the signed binary does not exist.
"""
try:
data_store.REL_DB.ReadSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn)
)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(binary_urn)
data_store.REL_DB.DeleteSignedBinaryReferences(
SignedBinaryIDFromURN(binary_urn)
) | Deletes the binary with the given urn from the datastore.
Args:
binary_urn: RDFURN that serves as a unique identifier for the binary.
Raises:
SignedBinaryNotFoundError: If the signed binary does not exist. | DeleteSignedBinary | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchURNsForAllSignedBinaries() -> Sequence[rdfvalue.RDFURN]:
"""Returns URNs for all signed binaries in the datastore."""
return [
_SignedBinaryURNFromID(i)
for i in data_store.REL_DB.ReadIDsForAllSignedBinaries()
] | Returns URNs for all signed binaries in the datastore. | FetchURNsForAllSignedBinaries | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchBlobsForSignedBinaryByID(
binary_id: objects_pb2.SignedBinaryID,
) -> Tuple[Iterator[rdf_crypto.SignedBlob], rdfvalue.RDFDatetime]:
"""Retrieves blobs for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
"""
try:
references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences(
binary_id
)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(_SignedBinaryURNFromID(binary_id))
blob_ids = [models_blob.BlobID(r.blob_id) for r in references.items]
raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids)
blobs = (
rdf_crypto.SignedBlob.FromSerializedBytes(raw_blob)
for raw_blob in raw_blobs
)
return blobs, timestamp | Retrieves blobs for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists. | FetchBlobsForSignedBinaryByID | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchBlobForSignedBinaryByID(
binary_id: objects_pb2.SignedBinaryID,
blob_index: int,
) -> rdf_crypto.SignedBlob:
"""Retrieves a single blob for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
blob_index: Index of the blob to read.
Returns:
Signed blob.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
BlobIndexOutOfBoundsError: If requested blob index is too big.
"""
if blob_index < 0:
raise ValueError("blob_index must be >= 0.")
try:
references, _ = data_store.REL_DB.ReadSignedBinaryReferences(binary_id)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(_SignedBinaryURNFromID(binary_id))
try:
blob_id_bytes = references.items[blob_index].blob_id
except IndexError:
raise BlobIndexOutOfBoundsError(f"{blob_index} >= {len(references.items)}")
blob_id = models_blob.BlobID(blob_id_bytes)
raw_blob = data_store.BLOBS.ReadBlob(blob_id)
return rdf_crypto.SignedBlob.FromSerializedBytes(raw_blob) | Retrieves a single blob for the given binary from the datastore.
Args:
binary_id: An ID of the binary to be fetched.
blob_index: Index of the blob to read.
Returns:
Signed blob.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
BlobIndexOutOfBoundsError: If requested blob index is too big. | FetchBlobForSignedBinaryByID | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchBlobsForSignedBinaryByURN(
binary_urn: rdfvalue.RDFURN,
) -> Tuple[Iterator[rdf_crypto.SignedBlob], rdfvalue.RDFDatetime]:
"""Retrieves blobs for the given binary from the datastore.
Args:
binary_urn: RDFURN that uniquely identifies the binary.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
"""
return FetchBlobsForSignedBinaryByID(SignedBinaryIDFromURN(binary_urn)) | Retrieves blobs for the given binary from the datastore.
Args:
binary_urn: RDFURN that uniquely identifies the binary.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the binary's contents were saved
to the datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists. | FetchBlobsForSignedBinaryByURN | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def FetchSizeOfSignedBinary(
binary_id_or_urn: Union[rdf_objects.SignedBinaryID, rdfvalue.RDFURN],
) -> int:
"""Returns the size of the given binary (in bytes).
Args:
binary_id_or_urn: SignedBinaryID or RDFURN that uniquely identifies the
binary.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists.
"""
if isinstance(binary_id_or_urn, rdfvalue.RDFURN):
binary_id = SignedBinaryIDFromURN(binary_id_or_urn)
else:
binary_id = binary_id_or_urn
try:
references, _ = data_store.REL_DB.ReadSignedBinaryReferences(binary_id)
except db.UnknownSignedBinaryError:
raise SignedBinaryNotFoundError(binary_id)
last_reference = references.items[-1]
return last_reference.offset + last_reference.size | Returns the size of the given binary (in bytes).
Args:
binary_id_or_urn: SignedBinaryID or RDFURN that uniquely identifies the
binary.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists. | FetchSizeOfSignedBinary | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def StreamSignedBinaryContents(
blob_iterator: Iterator[rdf_crypto.SignedBlob], chunk_size: int = 1024
) -> Generator[bytes, None, None]:
"""Yields the contents of the given binary in chunks of the given size.
Args:
blob_iterator: An Iterator over all the binary's blobs.
chunk_size: Size, in bytes, of the chunks to yield.
"""
all_blobs_read = False
byte_buffer = io.BytesIO()
while not all_blobs_read or byte_buffer.getvalue():
while not all_blobs_read and byte_buffer.tell() < chunk_size:
try:
blob = next(blob_iterator)
except StopIteration:
all_blobs_read = True
break
byte_buffer.write(blob.data)
if byte_buffer.tell() > 0:
# Yield a chunk of the signed binary and reset the buffer to contain
# only data that hasn't been sent yet.
byte_buffer.seek(0)
yield byte_buffer.read(chunk_size)
byte_buffer = io.BytesIO(byte_buffer.read())
byte_buffer.seek(0, io.SEEK_END) | Yields the contents of the given binary in chunks of the given size.
Args:
blob_iterator: An Iterator over all the binary's blobs.
chunk_size: Size, in bytes, of the chunks to yield. | StreamSignedBinaryContents | python | google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/signed_binary_utils.py | Apache-2.0 |
def testArtifactsValidate(self, registry):
"""Check each artifact we have passes validation."""
registry.AddFileSource(self.test_artifacts_file)
for artifact in registry.GetArtifacts():
ar.Validate(artifact) | Check each artifact we have passes validation. | testArtifactsValidate | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def testArtifactsDependencies(self, registry):
"""Check artifact dependencies work."""
registry.AddFileSource(self.test_artifacts_file)
art_obj = registry.GetArtifact("TestAggregationArtifactDeps")
deps = ar.GetArtifactDependencies(art_obj)
self.assertCountEqual(list(deps), ["TestAggregationArtifact"])
deps = ar.GetArtifactDependencies(art_obj, recursive=True)
self.assertCountEqual(
list(deps),
["TestOSAgnostic", "TestCmdArtifact", "TestAggregationArtifact"],
)
# Test recursive loop.
# Make sure we use the registry registered version of the class.
source = art_obj.sources[0]
backup = source.attributes["names"]
try:
source.attributes["names"] = ["TestAggregationArtifactDeps"]
with self.assertRaises(RuntimeError) as e:
ar.GetArtifactDependencies(art_obj, recursive=True)
self.assertIn("artifact recursion depth", str(e.exception))
finally:
source.attributes["names"] = backup # Restore old source. | Check artifact dependencies work. | testArtifactsDependencies | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def testUserMergeWindows(self):
"""Check Windows users are accurately merged."""
kb = rdf_client.KnowledgeBase()
self.assertEmpty(kb.users)
kb.MergeOrAddUser(rdf_client.User(sid="1234"))
self.assertLen(kb.users, 1)
kb.MergeOrAddUser(rdf_client.User(sid="5678", username="test1"))
self.assertLen(kb.users, 2)
_, conflicts = kb.MergeOrAddUser(
rdf_client.User(sid="5678", username="test2")
)
self.assertLen(kb.users, 2)
self.assertEqual(conflicts[0], ("username", "test1", "test2"))
self.assertEqual(kb.GetUser(sid="5678").username, "test2")
# This should merge on user name as we have no other data.
kb.MergeOrAddUser(rdf_client.User(username="test2", homedir="a"))
self.assertLen(kb.users, 2)
# This should create a new user since the sid is different.
new_attrs, conflicts = kb.MergeOrAddUser(
rdf_client.User(username="test2", sid="12345", temp="/blah")
)
self.assertLen(kb.users, 3)
self.assertCountEqual(
new_attrs, ["users.username", "users.temp", "users.sid"]
)
self.assertEqual(conflicts, []) | Check Windows users are accurately merged. | testUserMergeWindows | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def testUserMergeLinux(self):
"""Check Linux users are accurately merged."""
kb = rdf_client.KnowledgeBase()
self.assertEmpty(kb.users)
kb.MergeOrAddUser(rdf_client.User(username="blake", last_logon=1111))
self.assertLen(kb.users, 1)
# This should merge since the username is the same.
kb.MergeOrAddUser(rdf_client.User(uid="12", username="blake"))
self.assertLen(kb.users, 1)
# This should create a new record because the uid is different
kb.MergeOrAddUser(
rdf_client.User(
username="blake", uid="13", desktop="/home/blake/Desktop"
)
)
self.assertLen(kb.users, 2)
kb.MergeOrAddUser(
rdf_client.User(
username="newblake", uid="14", desktop="/home/blake/Desktop"
)
)
self.assertLen(kb.users, 3)
# Check merging where we don't specify uid works
new_attrs, conflicts = kb.MergeOrAddUser(
rdf_client.User(username="newblake", desktop="/home/blakey/Desktop")
)
self.assertLen(kb.users, 3)
self.assertCountEqual(new_attrs, ["users.username", "users.desktop"])
self.assertCountEqual(
conflicts, [("desktop", "/home/blake/Desktop", "/home/blakey/Desktop")]
) | Check Linux users are accurately merged. | testUserMergeLinux | python | google/grr | grr/server/grr_response_server/artifact_utils_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_utils_test.py | Apache-2.0 |
def Start(self, process_fn: Callable[[common_pb2.Message], None]) -> None:
"""Start the (asynchronous) subscriber.
Args:
process_fn: message-processing callback; all messages received from
Fleetspeak are passed to this function.
Multiple message-receiving and processing threads will be spawned in the
background, as per the config var `Server.fleetspeak_cps_concurrency.
"""
def _PubsubCallback(cps_msg: pubsub_v1.subscriber.message.Message) -> None:
# Using broad Exception catching here because, at this point, any error
# is unrecoverable. This code is run by some thread spawned by the
# google-cloud lib; any uncaught exception would just crash that thread.
try:
fs_msg = common_pb2.Message.FromString(cps_msg.data)
except Exception as e: # pylint: disable=broad-exception-caught
# Any error in message deserialization is final - we don't know how to
# handle the message. Log the error and drop the message permanently.
logging.exception(
"Dropping malformed CPS message from Fleetspeak: %s", e
)
cps_msg.ack()
return
try:
process_fn(fs_msg)
except Exception as e: # pylint: disable=broad-exception-caught
# A message processing error might be temporary (i.e. may be caused by
# some temporary condition). Mark the message as NACK, so that it will
# be redelivered at a later time.
logging.exception("Exception during CPS message processing: %s", e)
cps_msg.nack()
else:
cps_msg.ack()
self._client = pubsub_v1.SubscriberClient()
sub_path = self._client.subscription_path(self._project, self._subscription)
for i in range(self._concurrency):
logging.info(
"Starting Cloud Pub/Sub subscriber %d/%d", i + 1, self._concurrency
)
fut = self._client.subscribe(sub_path, callback=_PubsubCallback)
self._sub_futures.append(fut) | Start the (asynchronous) subscriber.
Args:
process_fn: message-processing callback; all messages received from
Fleetspeak are passed to this function.
Multiple message-receiving and processing threads will be spawned in the
background, as per the config var `Server.fleetspeak_cps_concurrency. | Start | python | google/grr | grr/server/grr_response_server/fleetspeak_cps.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_cps.py | Apache-2.0 |
def Stop(self) -> None:
"""Stop the (asynchronous) subscriber.
This will block until all message-processing threads shut down.
"""
for fut in self._sub_futures:
fut.cancel()
for fut in self._sub_futures:
fut.result()
self._client = None
self._sub_futures = [] | Stop the (asynchronous) subscriber.
This will block until all message-processing threads shut down. | Stop | python | google/grr | grr/server/grr_response_server/fleetspeak_cps.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_cps.py | Apache-2.0 |
def WaitUntil(self, condition_cb, timeout=5):
"""Wait a fixed time until the condition is true."""
for _ in range(int(timeout / self.sleep_time)):
res = condition_cb()
if res:
return res
time.sleep(self.sleep_time)
raise RuntimeError("Timeout exceeded. Condition not true") | Wait a fixed time until the condition is true. | WaitUntil | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testThreadCreation(self):
"""Ensure the thread pool started the minimum number of threads."""
self.assertEqual(
self.Count("pool-testThreadCreation"), self.NUMBER_OF_THREADS
) | Ensure the thread pool started the minimum number of threads. | testThreadCreation | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testStopping(self):
"""Tests if all worker threads terminate if the thread pool is stopped."""
self.assertEqual(self.Count("pool-testStopping"), self.NUMBER_OF_THREADS)
self.test_pool.Stop()
self.assertEqual(self.Count("pool-testStopping"), 0)
self.test_pool.Start()
self.assertEqual(self.Count("pool-testStopping"), self.NUMBER_OF_THREADS)
self.test_pool.Stop()
self.assertEqual(self.Count("pool-testStopping"), 0) | Tests if all worker threads terminate if the thread pool is stopped. | testStopping | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testRunTasks(self):
"""Test for running jobs on the thread pool.
This runs 1500 tasks on the ThreadPool and waits for them to
complete.
"""
# Tests if calling Join on an empty ThreadPool works.
self.test_pool.Join()
self.lock = threading.Lock()
def Insert(list_obj, element):
with self.lock:
list_obj.append(element)
test_list = []
for i in range(self.NUMBER_OF_TASKS):
self.test_pool.AddTask(Insert, (test_list, i))
self.test_pool.Join()
test_list.sort()
self.assertEqual(list(range(self.NUMBER_OF_TASKS)), test_list) | Test for running jobs on the thread pool.
This runs 1500 tasks on the ThreadPool and waits for them to
complete. | testRunTasks | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def IRaise(some_obj):
"""This method just raises an exception."""
with self.lock:
# This simulates an error by calling a non-existent function.
some_obj.process() | This method just raises an exception. | testRunRaisingTask.IRaise | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testRunRaisingTask(self):
"""Tests the behavior of the pool if a task throws an exception."""
self.lock = threading.Lock()
def IRaise(some_obj):
"""This method just raises an exception."""
with self.lock:
# This simulates an error by calling a non-existent function.
some_obj.process()
self.exception_args = []
def MockException(*args):
self.exception_args = args
with self.assertStatsCounterDelta(
2, threadpool.THREADPOOL_TASK_EXCEPTIONS, fields=[self.test_pool.name]
):
with mock.patch.object(logging, "exception", MockException):
self.test_pool.AddTask(IRaise, (None,), "Raising")
self.test_pool.AddTask(IRaise, (None,), "Raising")
self.test_pool.Join()
# Check that an exception is raised.
self.assertIn("exception in worker thread", self.exception_args[0])
self.assertEqual(self.exception_args[1], "Raising") | Tests the behavior of the pool if a task throws an exception. | testRunRaisingTask | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testFailToCreateThread(self):
"""Test that we handle thread creation problems ok."""
# The pool starts off with the minimum number of threads.
self.assertLen(self.test_pool, self.NUMBER_OF_THREADS)
done_event = threading.Event()
def Block(done):
done.wait()
def RaisingStart(_):
raise threading.ThreadError()
# Now simulate failure of creating threads.
with mock.patch.object(threadpool._WorkerThread, "start", RaisingStart):
# Fill all the existing threads and wait for them to become busy.
self.test_pool.AddTask(Block, (done_event,))
self.WaitUntil(
lambda: self.test_pool.busy_threads == self.NUMBER_OF_THREADS
)
# Now fill the queue completely..
for _ in range(self.MAXIMUM_THREADS):
self.test_pool.AddTask(Block, (done_event,))
# Trying to push this task will overflow the queue, and would normally
# cause a new thread to start. We use non blocking mode to receive the
# exception.
self.assertRaises(
threadpool.Full,
self.test_pool.AddTask,
Block,
(done_event,),
blocking=False,
inline=False,
)
# Release the blocking tasks.
done_event.set()
self.test_pool.Join() | Test that we handle thread creation problems ok. | testFailToCreateThread | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testThreadsReaped(self):
"""Check that threads are reaped when too old."""
self.now = 0
with utils.MultiStubber(
(time, "time", lambda: self.now),
(threading, "_time", lambda: self.now),
(queue, "_time", lambda: self.now),
(self.test_pool, "CPUUsage", lambda: 0),
):
done_event = threading.Event()
res = []
def Block(done, count):
done.wait()
res.append(count)
for i in range(2 * self.MAXIMUM_THREADS):
self.test_pool.AddTask(Block, (done_event, i), "Blocking", inline=False)
self.assertLen(self.test_pool, self.MAXIMUM_THREADS)
# Release the threads. All threads are now idle.
done_event.set()
# Fast forward the time
self.now = 1000
# Threads will now kill themselves off and the threadpool will be reduced
# to the minimum number of threads..
self.WaitUntil(lambda: len(self.test_pool) == self.NUMBER_OF_THREADS)
# Ensure we have the minimum number of threads left now.
self.assertLen(self.test_pool, self.NUMBER_OF_THREADS) | Check that threads are reaped when too old. | testThreadsReaped | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testExportedFunctions(self):
"""Tests if the outstanding tasks variable is exported correctly."""
signal_event, wait_event = threading.Event(), threading.Event()
def RunFn():
signal_event.set()
wait_event.wait()
pool_name = "test_pool3"
pool = threadpool.ThreadPool.Factory(pool_name, 10)
pool.Start()
try:
# First 10 tasks should be scheduled immediately, as we have max_threads
# set to 10.
for _ in range(10):
signal_event.clear()
pool.AddTask(RunFn, ())
signal_event.wait(10)
# Next 5 tasks should sit in the queue.
for _ in range(5):
with self.assertStatsCounterDelta(
1, threadpool.THREADPOOL_OUTSTANDING_TASKS, fields=[pool_name]
):
pool.AddTask(RunFn, ())
finally:
wait_event.set()
pool.Stop() | Tests if the outstanding tasks variable is exported correctly. | testExportedFunctions | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testDuplicateNameError(self):
"""Tests that creating two pools with the same name fails."""
prefix = self.test_pool.name
self.assertRaises(
threadpool.DuplicateThreadpoolError, threadpool.ThreadPool, prefix, 10
) | Tests that creating two pools with the same name fails. | testDuplicateNameError | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testDuplicateName(self):
"""Tests that we can get the same pool again through the factory."""
prefix = "duplicate_name"
pool = threadpool.ThreadPool.Factory(prefix, 10)
try:
self.assertEqual(pool.started, False)
pool.Start()
self.assertEqual(pool.started, True)
# This should return the same pool as before.
pool2 = threadpool.ThreadPool.Factory(prefix, 10)
self.assertEqual(pool2.started, True)
finally:
pool.Stop() | Tests that we can get the same pool again through the factory. | testDuplicateName | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testAnonymousThreadpool(self):
"""Tests that we can't starts anonymous threadpools."""
prefix = None
with self.assertRaises(ValueError):
threadpool.ThreadPool.Factory(prefix, 10) | Tests that we can't starts anonymous threadpools. | testAnonymousThreadpool | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def CreatePluginAndDefaultState(cls, source_urn=None, args=None):
"""Creates a plugin and returns its initial state."""
state = rdf_protodict.AttributedDict()
if args is not None:
args.Validate()
state["args"] = args
plugin = cls(source_urn=source_urn, args=args)
plugin.InitializeState(state)
return plugin, state | Creates a plugin and returns its initial state. | CreatePluginAndDefaultState | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def __init__(self, source_urn=None, args=None):
"""OutputPlugin constructor.
Constructor should be overridden to maintain instance-local state - i.e.
state that gets accumulated during the single output plugin run and that
should be used to update the global state via UpdateState method.
Args:
source_urn: URN of the data source to process the results from.
args: This plugin's arguments.
"""
self.source_urn = source_urn
self.args = args
self.lock = threading.RLock() | OutputPlugin constructor.
Constructor should be overridden to maintain instance-local state - i.e.
state that gets accumulated during the single output plugin run and that
should be used to update the global state via UpdateState method.
Args:
source_urn: URN of the data source to process the results from.
args: This plugin's arguments. | __init__ | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def InitializeState(self, state):
"""Initializes the state the output plugin can use later.
InitializeState() is called only once per plugin's lifetime. It
will be called when hunt or flow is created. It should be used to
register state variables. It's called on the worker, so no
security checks apply.
Args:
state: rdf_protodict.AttributedDict to be filled with default values.
""" | Initializes the state the output plugin can use later.
InitializeState() is called only once per plugin's lifetime. It
will be called when hunt or flow is created. It should be used to
register state variables. It's called on the worker, so no
security checks apply.
Args:
state: rdf_protodict.AttributedDict to be filled with default values. | InitializeState | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def ProcessResponses(self, state, responses):
"""Processes bunch of responses.
When responses are processed, multiple ProcessResponses() calls can
be done in a row. ProcessResponse() calls may be parallelized within the
same worker to improve output performance, therefore ProcessResponses()
implementation should be thread-safe. ProcessResponse() calls are
*always* followed by a single Flush() call on the same worker.
ProcessResponses() is called on the worker, so no security checks apply.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below).
responses: GrrMessages from the hunt results collection.
""" | Processes bunch of responses.
When responses are processed, multiple ProcessResponses() calls can
be done in a row. ProcessResponse() calls may be parallelized within the
same worker to improve output performance, therefore ProcessResponses()
implementation should be thread-safe. ProcessResponse() calls are
*always* followed by a single Flush() call on the same worker.
ProcessResponses() is called on the worker, so no security checks apply.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below).
responses: GrrMessages from the hunt results collection. | ProcessResponses | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def Flush(self, state):
"""Flushes the output plugin's state.
Flush is *always* called after a series of ProcessResponses() calls.
Flush() is called on the worker, so no security checks apply.
NOTE: This method doesn't have to be thread-safe as it's called once
after a series of ProcessResponses() calls is complete.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below).
""" | Flushes the output plugin's state.
Flush is *always* called after a series of ProcessResponses() calls.
Flush() is called on the worker, so no security checks apply.
NOTE: This method doesn't have to be thread-safe as it's called once
after a series of ProcessResponses() calls is complete.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below). | Flush | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def UpdateState(self, state):
"""Updates state of the output plugin.
UpdateState is called after a series of ProcessResponses() calls and
after a Flush() call. The implementation of this method should be
lightweight, since its will be guaranteed to be called atomically
in a middle of database transaction.
Args:
state: rdf_protodict.AttributedDict with plugin's state to be updated.
""" | Updates state of the output plugin.
UpdateState is called after a series of ProcessResponses() calls and
after a Flush() call. The implementation of this method should be
lightweight, since its will be guaranteed to be called atomically
in a middle of database transaction.
Args:
state: rdf_protodict.AttributedDict with plugin's state to be updated. | UpdateState | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def DropPrivileges():
"""Attempt to drop privileges if required."""
if config.CONFIG["Server.username"]:
try:
os.setuid(pwd.getpwnam(config.CONFIG["Server.username"]).pw_uid)
except (KeyError, OSError):
logging.exception(
"Unable to switch to user %s", config.CONFIG["Server.username"]
)
raise | Attempt to drop privileges if required. | DropPrivileges | python | google/grr | grr/server/grr_response_server/server_startup.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_startup.py | Apache-2.0 |
def Init(disabled: bool = False):
"""Run all required startup routines and initialization hooks."""
# Set up a temporary syslog handler so we have somewhere to log problems
# with ConfigInit() which needs to happen before we can start our create our
# proper logging setup.
syslog_logger = logging.getLogger("TempLogger")
if os.path.exists("/dev/log"):
handler = logging.handlers.SysLogHandler(address="/dev/log")
else:
handler = logging.handlers.SysLogHandler()
syslog_logger.addHandler(handler)
# The default behavior of server components is to raise errors when
# encountering unknown config options.
flags.FLAGS.disallow_missing_config_definitions = True
try:
config_lib.SetPlatformArchContext()
config_lib.ParseConfigCommandLine(rename_invalid_writeback=False)
except config_lib.Error:
syslog_logger.exception("Died during config initialization")
raise
stats_collector = prometheus_stats_collector.PrometheusStatsCollector(
registry=prometheus_client.REGISTRY
)
stats_collector_instance.Set(stats_collector)
server_logging.ServerLoggingStartupInit()
bs_registry_init.RegisterBlobStores()
ec_registry_init.RegisterExportConverters()
gui_api_registry_init.RegisterApiCallRouters()
data_store.InitializeDataStore()
if contexts.ADMIN_UI_CONTEXT in config.CONFIG.context:
api_auth_manager.InitializeApiAuthManager()
artifact.LoadArtifactsOnce() # Requires aff4.AFF4Init.
client_approval_auth.InitializeClientApprovalAuthorizationManagerOnce()
if not disabled:
cronjobs.InitializeCronWorkerOnce()
email_alerts.InitializeEmailAlerterOnce()
http_api.InitializeHttpRequestHandlerOnce()
ip_resolver.IPResolverInitOnce()
stats_server.InitializeStatsServerOnce()
webauth.InitializeWebAuthOnce()
# Exempt config updater from this check because it is the one responsible for
# setting the variable.
if not config.CONFIG.ContextApplied("ConfigUpdater Context"):
if not config.CONFIG.Get("Server.initialized"):
raise RuntimeError(
'Config not initialized, run "grr_config_updater'
' initialize". If the server is already configured,'
' add "Server.initialized: True" to your config.'
) | Run all required startup routines and initialization hooks. | Init | python | google/grr | grr/server/grr_response_server/server_startup.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_startup.py | Apache-2.0 |
def setUp(self):
"""Make sure things are initialized."""
super().setUp()
self.client_mock = action_mocks.ClientFileFinderWithVFS()
patcher = artifact_test_lib.PatchDefaultArtifactRegistry()
patcher.start()
self.addCleanup(patcher.stop) | Make sure things are initialized. | setUp | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def LoadTestArtifacts(self):
"""Add the test artifacts in on top of whatever is in the registry."""
artifact_registry.REGISTRY.AddFileSource(
os.path.join(
config.CONFIG["Test.data_dir"], "artifacts", "test_artifacts.json"
)
) | Add the test artifacts in on top of whatever is in the registry. | LoadTestArtifacts | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def RunCollectorAndGetResults(
self,
artifact_list: Iterator[str],
client_mock: Optional[MockClient] = None,
client_id: Optional[str] = None,
error_on_no_results: bool = False,
split_output_by_artifact: bool = False,
):
"""Helper to handle running the collector flow."""
if client_mock is None:
client_mock = self.MockClient(client_id=client_id)
session_id = flow_test_lib.StartAndRunFlow(
collectors.ArtifactCollectorFlow,
client_mock=client_mock,
client_id=client_id,
flow_args=rdf_artifacts.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
error_on_no_results=error_on_no_results,
split_output_by_artifact=split_output_by_artifact,
),
creator=self.test_username,
)
return flow_test_lib.GetFlowResults(client_id, session_id) | Helper to handle running the collector flow. | RunCollectorAndGetResults | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def setUp(self):
"""Make sure things are initialized."""
super().setUp()
users = [
knowledge_base_pb2.User(username="gogol"),
knowledge_base_pb2.User(username="gevulot"),
knowledge_base_pb2.User(username="exomemory"),
knowledge_base_pb2.User(username="user1"),
knowledge_base_pb2.User(username="user2"),
]
self.SetupClient(0, system="Linux", os_version="12.04", users=users)
self.LoadTestArtifacts() | Make sure things are initialized. | setUp | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testFilesArtifact(self):
"""Check GetFiles artifacts."""
client_id = test_lib.TEST_CLIENT_ID
with vfs_test_lib.FakeTestDataVFSOverrider():
self.RunCollectorAndGetResults(
["TestFilesArtifact"],
client_mock=action_mocks.ClientFileFinderWithVFS(),
client_id=client_id,
)
cp = db.ClientPath.OS(client_id, ("var", "log", "auth.log"))
fd = file_store.OpenFile(cp)
self.assertNotEmpty(fd.read()) | Check GetFiles artifacts. | testFilesArtifact | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testArtifactOutput(self):
"""Check we can run command based artifacts."""
client_id = test_lib.TEST_CLIENT_ID
with vfs_test_lib.FakeTestDataVFSOverrider():
# Will raise if something goes wrong.
self.RunCollectorAndGetResults(
["TestFilesArtifact"],
client_mock=self.client_mock,
client_id=client_id,
)
# Will raise if something goes wrong.
self.RunCollectorAndGetResults(
["TestFilesArtifact"],
client_mock=self.client_mock,
client_id=client_id,
split_output_by_artifact=True,
)
# Test the error_on_no_results option.
with self.assertRaises(RuntimeError) as context:
with test_lib.SuppressLogs():
self.RunCollectorAndGetResults(
["NullArtifact"],
client_mock=self.client_mock,
client_id=client_id,
split_output_by_artifact=True,
error_on_no_results=True,
)
if "collector returned 0 responses" not in str(context.exception):
raise RuntimeError("0 responses should have been returned") | Check we can run command based artifacts. | testArtifactOutput | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalWindows(self):
"""Check we can retrieve a knowledge base from a client."""
kb = self._RunKBI()
self.assertEqual(kb.environ_systemroot, "C:\\Windows")
self.assertEqual(kb.time_zone, "US/Alaska")
self.assertEqual(kb.code_page, "cp_1252")
self.assertEqual(kb.environ_windir, "C:\\Windows")
self.assertEqual(kb.environ_profilesdirectory, "C:\\Users")
self.assertEqual(kb.environ_allusersprofile, "C:\\ProgramData")
self.assertEqual(kb.environ_allusersappdata, "C:\\ProgramData")
self.assertEqual(kb.environ_temp, "C:\\Windows\\TEMP")
self.assertEqual(kb.environ_systemdrive, "C:")
self.assertCountEqual([x.username for x in kb.users], ["jim", "kovacs"])
user = kb.GetUser(username="jim")
self.assertEqual(user.username, "jim")
self.assertEqual(user.sid, "S-1-5-21-702227068-2140022151-3110739409-1000") | Check we can retrieve a knowledge base from a client. | testKnowledgeBaseRetrievalWindows | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalLinux(self):
"""Check we can retrieve a Linux kb."""
class KnowledgebaseInitMock(action_mocks.FileFinderClientMock):
def EnumerateUsers(
self,
args: None,
) -> Iterator[rdf_client.User]:
del args # Unused.
yield rdf_client.User(
username="user1",
homedir="/home/user1",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1296552099),
)
yield rdf_client.User(
username="user2",
homedir="/home/user2",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1234567890),
)
yield rdf_client.User(
username="user3",
homedir="/home/user3",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3456789012),
)
yield rdf_client.User(
username="yagharek",
homedir="/home/yagharek",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(7890123456),
)
session_id = flow_test_lib.StartAndRunFlow(
artifact.KnowledgeBaseInitializationFlow,
client_id=test_lib.TEST_CLIENT_ID,
client_mock=KnowledgebaseInitMock(),
)
results = flow_test_lib.GetFlowResults(test_lib.TEST_CLIENT_ID, session_id)
self.assertLen(results, 1)
self.assertIsInstance(results[0], rdf_client.KnowledgeBase)
kb = results[0]
self.assertCountEqual(
[x.username for x in kb.users], ["user1", "user2", "user3", "yagharek"]
)
user = kb.GetUser(username="user1")
self.assertEqual(user.last_logon.AsSecondsSinceEpoch(), 1296552099)
self.assertEqual(user.homedir, "/home/user1") | Check we can retrieve a Linux kb. | testKnowledgeBaseRetrievalLinux | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalLinuxNoUsers(self):
"""Cause a users.username dependency failure."""
with vfs_test_lib.FakeTestDataVFSOverrider():
kb = self._RunKBI(require_complete=False)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
self.assertCountEqual([x.username for x in kb.users], []) | Cause a users.username dependency failure. | testKnowledgeBaseRetrievalLinuxNoUsers | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalDarwin(self):
"""Check we can retrieve a Darwin kb."""
with vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.ClientVFSHandlerFixture,
):
kb = self._RunKBI()
self.assertEqual(kb.os_major_version, 10)
self.assertEqual(kb.os_minor_version, 9)
# scalzi from /Users dir listing.
self.assertCountEqual([x.username for x in kb.users], ["scalzi"])
user = kb.GetUser(username="scalzi")
self.assertEqual(user.homedir, "/Users/scalzi") | Check we can retrieve a Darwin kb. | testKnowledgeBaseRetrievalDarwin | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testOperatingSystemSelection(self):
"""Tests that we can distinguish based on operating system."""
self.SetupClient(1, system="Windows XP")
self.SetupClient(2, system="Linux")
self.SetupClient(3, system="Windows 7")
with mock.patch.object(
hunt, "StartHuntFlowOnClient", self.StartHuntFlowOnClient
):
# Now setup the filters
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + rdfvalue.Duration.From(1, rdfvalue.HOURS)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule",
hunt_id="11111111",
)
# Matches Windows boxes
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.OS,
os=foreman_rules.ForemanOsClientRule(os_windows=True),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
self.clients_started = []
foreman_obj = foreman.Foreman()
foreman_obj.AssignTasksToClient("C.1000000000000001")
foreman_obj.AssignTasksToClient("C.1000000000000002")
foreman_obj.AssignTasksToClient("C.1000000000000003")
# Make sure that only the windows machines ran
self.assertLen(self.clients_started, 2)
self.assertEqual(self.clients_started[0][1], "C.1000000000000001")
self.assertEqual(self.clients_started[1][1], "C.1000000000000003")
self.clients_started = []
# Run again - This should not fire since it did already
foreman_obj.AssignTasksToClient("C.1000000000000001")
foreman_obj.AssignTasksToClient("C.1000000000000002")
foreman_obj.AssignTasksToClient("C.1000000000000003")
self.assertEmpty(self.clients_started) | Tests that we can distinguish based on operating system. | testOperatingSystemSelection | python | google/grr | grr/server/grr_response_server/foreman_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_test.py | Apache-2.0 |
def testIntegerComparisons(self):
"""Tests that we can use integer matching rules on the foreman."""
base_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1336480583.077736)
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1336300000.000000)
self.SetupClient(0x11, system="Windows XP", install_time=base_time)
self.SetupClient(0x12, system="Windows 7", install_time=base_time)
# This one was installed one week earlier.
one_week_ago = base_time - rdfvalue.Duration.From(1, rdfvalue.WEEKS)
self.SetupClient(0x13, system="Windows 7", install_time=one_week_ago)
self.SetupClient(0x14, system="Windows 7", last_boot_time=boot_time)
with mock.patch.object(
hunt, "StartHuntFlowOnClient", self.StartHuntFlowOnClient
):
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + rdfvalue.Duration.From(1, rdfvalue.HOURS)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule(old)",
hunt_id="11111111",
)
# Matches the old client
one_hour_ago = base_time - rdfvalue.Duration.From(1, rdfvalue.HOURS)
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="INSTALL_TIME",
operator=foreman_rules.ForemanIntegerClientRule.Operator.LESS_THAN,
value=one_hour_ago.AsSecondsSinceEpoch(),
),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule(new)",
hunt_id="22222222",
)
# Matches the newer clients
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="INSTALL_TIME",
operator=foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN,
value=one_hour_ago.AsSecondsSinceEpoch(),
),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule(eq)",
hunt_id="33333333",
)
# Note that this also tests the handling of nonexistent attributes.
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="LAST_BOOT_TIME",
operator="EQUAL",
value=boot_time.AsSecondsSinceEpoch(),
),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
foreman_obj = foreman.Foreman()
self.clients_started = []
foreman_obj.AssignTasksToClient("C.1000000000000011")
foreman_obj.AssignTasksToClient("C.1000000000000012")
foreman_obj.AssignTasksToClient("C.1000000000000013")
foreman_obj.AssignTasksToClient("C.1000000000000014")
# Make sure that the clients ran the correct flows.
self.assertLen(self.clients_started, 4)
self.assertEqual(self.clients_started[0][1], "C.1000000000000011")
self.assertEqual("22222222", self.clients_started[0][0])
self.assertEqual(self.clients_started[1][1], "C.1000000000000012")
self.assertEqual("22222222", self.clients_started[1][0])
self.assertEqual(self.clients_started[2][1], "C.1000000000000013")
self.assertEqual("11111111", self.clients_started[2][0])
self.assertEqual(self.clients_started[3][1], "C.1000000000000014")
self.assertEqual("33333333", self.clients_started[3][0]) | Tests that we can use integer matching rules on the foreman. | testIntegerComparisons | python | google/grr | grr/server/grr_response_server/foreman_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_test.py | Apache-2.0 |
def Init(service_client=None):
"""Initializes the Fleetspeak connector."""
global CONN
global label_map
if service_client is None:
service_client_cls = fs_client.InsecureGRPCServiceClient
fleetspeak_message_listen_address = (
config.CONFIG["Server.fleetspeak_message_listen_address"] or None
)
fleetspeak_server = config.CONFIG["Server.fleetspeak_server"] or None
if fleetspeak_message_listen_address is None and fleetspeak_server is None:
logging.warning(
"Missing config options `Server.fleetspeak_message_listen_address', "
"`Server.fleetspeak_server', at least one of which is required to "
"initialize a connection to Fleetspeak; Not using Fleetspeak."
)
return
service_client = service_client_cls(
"GRR",
fleetspeak_message_listen_address=fleetspeak_message_listen_address,
fleetspeak_server=fleetspeak_server,
threadpool_size=config.CONFIG["Threadpool.size"],
)
label_map = {}
for entry in config.CONFIG["Server.fleetspeak_label_map"]:
key, value = entry.split(":")
label_map[key.strip()] = value.strip()
CONN = service_client
logging.info("Fleetspeak connector initialized.") | Initializes the Fleetspeak connector. | Init | python | google/grr | grr/server/grr_response_server/fleetspeak_connector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_connector.py | Apache-2.0 |
def __init__(self, daily_req_limit=0, dup_interval=rdfvalue.Duration(0)):
"""Create flow throttler object.
Args:
daily_req_limit: Number of flows allow per user per client. Integer.
dup_interval: rdfvalue.Duration time during which duplicate flows will be
blocked.
"""
self.daily_req_limit = daily_req_limit
self.dup_interval = dup_interval | Create flow throttler object.
Args:
daily_req_limit: Number of flows allow per user per client. Integer.
dup_interval: rdfvalue.Duration time during which duplicate flows will be
blocked. | __init__ | python | google/grr | grr/server/grr_response_server/throttle.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/throttle.py | Apache-2.0 |
def _LoadFlows(self, client_id, min_create_time):
"""Yields all flows for the given client_id and time range.
Args:
client_id: Client id string.
min_create_time: minimum creation time (inclusive)
Yields: flow_objects.Flow objects
"""
flow_list = data_store.REL_DB.ReadAllFlowObjects(
client_id=client_id,
min_create_time=min_create_time,
include_child_flows=False,
)
flow_list = [mig_flow_objects.ToRDFFlow(flow) for flow in flow_list]
for flow_obj in flow_list:
yield flow_obj | Yields all flows for the given client_id and time range.
Args:
client_id: Client id string.
min_create_time: minimum creation time (inclusive)
Yields: flow_objects.Flow objects | _LoadFlows | python | google/grr | grr/server/grr_response_server/throttle.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/throttle.py | Apache-2.0 |
def EnforceLimits(self, client_id, user, flow_name, flow_args=None):
"""Enforce DailyFlowRequestLimit and FlowDuplicateInterval.
Look at the flows that have run on this client recently and check
we aren't exceeding our limits. Raises if limits will be exceeded by running
the specified flow.
Args:
client_id: client URN
user: username string
flow_name: name of the Flow. Only used for FlowDuplicateInterval.
flow_args: flow args rdfvalue for the flow being launched
Raises:
DailyFlowRequestLimitExceededError: if the user has already run
API.DailyFlowRequestLimit on this client in the previous 24h.
DuplicateFlowError: an identical flow was run on this machine by a user
within the API.FlowDuplicateInterval
"""
if not self.dup_interval and not self.daily_req_limit:
return
now = rdfvalue.RDFDatetime.Now()
yesterday = now - rdfvalue.Duration.From(1, rdfvalue.DAYS)
dup_boundary = now - self.dup_interval
min_create_time = min(yesterday, dup_boundary)
flow_count = 0
flow_objs = self._LoadFlows(client_id, min_create_time)
if flow_args is None:
flow_args = rdf_flows.EmptyFlowArgs()
for flow_obj in flow_objs:
if (
flow_obj.create_time > dup_boundary
and flow_obj.flow_class_name == flow_name
and flow_obj.args == flow_args
):
raise DuplicateFlowError(
"Identical %s already run on %s at %s"
% (flow_name, client_id, flow_obj.create_time),
flow_id=flow_obj.flow_id,
)
# Filter for flows started by user within the 1 day window.
if flow_obj.creator == user and flow_obj.create_time > yesterday:
flow_count += 1
# If limit is set, enforce it.
if self.daily_req_limit and flow_count >= self.daily_req_limit:
raise DailyFlowRequestLimitExceededError(
"%s flows run since %s, limit: %s"
% (flow_count, yesterday, self.daily_req_limit)
) | Enforce DailyFlowRequestLimit and FlowDuplicateInterval.
Look at the flows that have run on this client recently and check
we aren't exceeding our limits. Raises if limits will be exceeded by running
the specified flow.
Args:
client_id: client URN
user: username string
flow_name: name of the Flow. Only used for FlowDuplicateInterval.
flow_args: flow args rdfvalue for the flow being launched
Raises:
DailyFlowRequestLimitExceededError: if the user has already run
API.DailyFlowRequestLimit on this client in the previous 24h.
DuplicateFlowError: an identical flow was run on this machine by a user
within the API.FlowDuplicateInterval | EnforceLimits | python | google/grr | grr/server/grr_response_server/throttle.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/throttle.py | Apache-2.0 |
def AddDir(self, dirpath):
"""Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source.
"""
if dirpath not in self._dirs:
self._dirs.add(dirpath)
return True
return False | Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source. | AddDir | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def AddFile(self, filepath):
"""Adds a file path as a source.
Args:
filepath: a string representing a path to the file.
Returns:
True if the file is not an already existing source.
"""
if filepath not in self._files:
self._files.add(filepath)
return True
return False | Adds a file path as a source.
Args:
filepath: a string representing a path to the file.
Returns:
True if the file is not an already existing source. | AddFile | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetDirs(self):
"""Returns an iterator over defined source directory paths."""
return iter(self._dirs) | Returns an iterator over defined source directory paths. | GetDirs | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetFiles(self):
"""Returns an iterator over defined source file paths."""
return iter(self._files) | Returns an iterator over defined source file paths. | GetFiles | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetAllFiles(self):
"""Yields all defined source file paths.
This includes file paths defined directly and those defined implicitly by
defining a directory.
"""
for filepath in self._files:
yield filepath
for dirpath in self._dirs:
for filepath in ArtifactRegistrySources._GetDirYamlFiles(dirpath):
if filepath in self._files:
continue
yield filepath | Yields all defined source file paths.
This includes file paths defined directly and those defined implicitly by
defining a directory. | GetAllFiles | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _LoadArtifactsFromDatastore(self):
"""Load artifacts from the data store."""
loaded_artifacts = []
# TODO(hanuszczak): Why do we have to remove anything? If some artifact
# tries to shadow system artifact shouldn't we just ignore them and perhaps
# issue some warning instead? The datastore being loaded should be read-only
# during upload.
# A collection of artifacts that shadow system artifacts and need
# to be deleted from the data store.
to_delete = []
artifact_list = [
mig_artifacts.ToRDFArtifact(a)
for a in data_store.REL_DB.ReadAllArtifacts()
]
for artifact_value in artifact_list:
try:
self.RegisterArtifact(
artifact_value, source="datastore:", overwrite_if_exists=True
)
loaded_artifacts.append(artifact_value)
except rdf_artifacts.ArtifactDefinitionError as e:
# TODO(hanuszczak): String matching on exception message is rarely
# a good idea. Instead this should be refectored to some exception
# class and then handled separately.
if "system artifact" in str(e):
to_delete.append(artifact_value.name)
else:
raise
if to_delete:
DeleteArtifactsFromDatastore(to_delete, reload_artifacts=False)
self._dirty = True
# TODO(hanuszczak): This is connected to the previous TODO comment. Why
# do we throw exception at this point? Why do we delete something and then
# abort the whole upload procedure by throwing an exception?
detail = "system artifacts were shadowed and had to be deleted"
raise rdf_artifacts.ArtifactDefinitionError(to_delete, detail)
# Once all artifacts are loaded we can validate.
revalidate = True
while revalidate:
revalidate = False
for artifact_obj in loaded_artifacts[:]:
try:
Validate(artifact_obj)
except rdf_artifacts.ArtifactDefinitionError as e:
logging.exception("Artifact %s did not validate", artifact_obj.name)
artifact_obj.error_message = str(e)
loaded_artifacts.remove(artifact_obj)
revalidate = True | Load artifacts from the data store. | _LoadArtifactsFromDatastore | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def ArtifactsFromYaml(self, yaml_content):
"""Get a list of Artifacts from yaml."""
raw_list = list(yaml.safe_load_all(yaml_content))
# TODO(hanuszczak): I am very sceptical about that "doing the right thing"
# below. What are the real use cases?
# Try to do the right thing with json/yaml formatted as a list.
if (
isinstance(raw_list, list)
and len(raw_list) == 1
and isinstance(raw_list[0], list)
):
raw_list = raw_list[0]
# Convert json into artifact and validate.
valid_artifacts = []
for artifact_dict in raw_list:
# Old artifacts might still use deprecated fields, so we have to ignore
# such. Here, we simply delete keys from the dictionary as otherwise the
# RDF value constructor would raise on unknown fields.
for field in DEPRECATED_ARTIFACT_FIELDS:
artifact_dict.pop(field, None)
# Strip operating systems that are supported in ForensicArtifacts, but not
# in GRR. The Artifact will still be added to GRR's repository, but the
# unsupported OS will be removed. This can result in artifacts with 0
# supported_os entries. For end-users, there might still be value in
# seeing the artifact, even if the artifact's OS is not supported.
if "supported_os" in artifact_dict:
artifact_dict["supported_os"] = [
os
for os in artifact_dict["supported_os"]
if os not in rdf_artifacts.Artifact.IGNORE_OS_LIST
]
# In this case we are feeding parameters directly from potentially
# untrusted yaml/json to our RDFValue class. However, safe_load ensures
# these are all primitive types as long as there is no other
# deserialization involved, and we are passing these into protobuf
# primitive types.
try:
artifact_value = rdf_artifacts.Artifact(**artifact_dict)
valid_artifacts.append(artifact_value)
except (TypeError, AttributeError, type_info.TypeValueError) as e:
name = artifact_dict.get("name")
raise rdf_artifacts.ArtifactDefinitionError(
name, "invalid definition", cause=e
)
return valid_artifacts | Get a list of Artifacts from yaml. | ArtifactsFromYaml | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _LoadArtifactsFromFiles(self, file_paths, overwrite_if_exists=True):
"""Load artifacts from file paths as json or yaml."""
loaded_files = []
loaded_artifacts = []
for file_path in file_paths:
try:
with io.open(file_path, mode="r", encoding="utf-8") as fh:
logging.debug("Loading artifacts from %s", file_path)
for artifact_val in self.ArtifactsFromYaml(fh.read()):
self.RegisterArtifact(
artifact_val,
source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists,
)
loaded_artifacts.append(artifact_val)
logging.debug(
"Loaded artifact %s from %s", artifact_val.name, file_path
)
loaded_files.append(file_path)
except (IOError, OSError):
logging.exception("Failed to open artifact file %s.", file_path)
except rdf_artifacts.ArtifactDefinitionError:
logging.exception(
"Invalid artifact found in file %s with error", file_path
)
raise
# Once all artifacts are loaded we can validate.
for artifact_value in loaded_artifacts:
Validate(artifact_value) | Load artifacts from file paths as json or yaml. | _LoadArtifactsFromFiles | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def RegisterArtifact(
self,
artifact_rdfvalue,
source="datastore",
overwrite_if_exists=False,
overwrite_system_artifacts=False,
):
"""Registers a new artifact."""
artifact_name = artifact_rdfvalue.name
if artifact_name in self._artifacts:
if not overwrite_if_exists:
details = "artifact already exists and `overwrite_if_exists` is unset"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
elif not overwrite_system_artifacts:
loaded_from_datastore = self.IsLoadedFrom(artifact_name, "datastore:")
if not loaded_from_datastore:
# This artifact was not uploaded to the datastore but came from a
# file, refuse to overwrite.
details = "system artifact cannot be overwritten"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
# Preserve where the artifact was loaded from to help debugging.
self._artifact_loaded_from[artifact_name] = source
# Clear any stale errors.
artifact_rdfvalue.error_message = None
self._artifacts[artifact_rdfvalue.name] = artifact_rdfvalue | Registers a new artifact. | RegisterArtifact | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _ReloadArtifacts(self):
"""Load artifacts from all sources."""
self._artifacts = {}
self._LoadArtifactsFromFiles(self._sources.GetAllFiles())
self.ReloadDatastoreArtifacts() | Load artifacts from all sources. | _ReloadArtifacts | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _UnregisterDatastoreArtifacts(self):
"""Remove artifacts that came from the datastore."""
to_remove = []
for name in self._artifacts:
if self.IsLoadedFrom(name, "datastore"):
to_remove.append(name)
for key in to_remove:
self._artifacts.pop(key)
self._artifact_loaded_from.pop(key) | Remove artifacts that came from the datastore. | _UnregisterDatastoreArtifacts | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifacts(
self,
os_name=None,
name_list=None,
source_type=None,
exclude_dependents=False,
reload_datastore_artifacts=False,
):
"""Retrieve artifact classes with optional filtering.
All filters must match for the artifact to be returned.
Args:
os_name: string to match against supported_os
name_list: list of strings to match against artifact names
source_type: rdf_artifacts.ArtifactSource.SourceType to match against
source_type
exclude_dependents: if true only artifacts with no dependencies will be
returned
reload_datastore_artifacts: If true, the data store sources are queried
for new artifacts.
Returns:
list of artifacts matching filter criteria
"""
self._CheckDirty(reload_datastore_artifacts=reload_datastore_artifacts)
results = {}
for artifact in self._artifacts.values():
# artifact.supported_os = [] matches all OSes
if (
os_name
and artifact.supported_os
and (os_name not in artifact.supported_os)
):
continue
if name_list and artifact.name not in name_list:
continue
if source_type:
source_types = [c.type for c in artifact.sources]
if source_type not in source_types:
continue
if exclude_dependents and GetArtifactPathDependencies(artifact):
continue
results[artifact.name] = artifact
return list(results.values()) | Retrieve artifact classes with optional filtering.
All filters must match for the artifact to be returned.
Args:
os_name: string to match against supported_os
name_list: list of strings to match against artifact names
source_type: rdf_artifacts.ArtifactSource.SourceType to match against
source_type
exclude_dependents: if true only artifacts with no dependencies will be
returned
reload_datastore_artifacts: If true, the data store sources are queried
for new artifacts.
Returns:
list of artifacts matching filter criteria | GetArtifacts | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifact(self, name):
"""Get artifact by name.
Args:
name: artifact name string.
Returns:
artifact object.
Raises:
ArtifactNotRegisteredError: if artifact doesn't exist in the registry.
"""
self._CheckDirty()
result = self._artifacts.get(name)
if not result:
raise rdf_artifacts.ArtifactNotRegisteredError(
"Artifact %s missing from registry. You may need to sync the "
"artifact repo by running make in the artifact directory." % name
)
return result | Get artifact by name.
Args:
name: artifact name string.
Returns:
artifact object.
Raises:
ArtifactNotRegisteredError: if artifact doesn't exist in the registry. | GetArtifact | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def Exists(self, name: str) -> bool:
"""Checks whether the artifact of the specified name exists in the registry.
Args:
name: A name of the artifact.
Returns:
`True` if the artifact exists, `False` otherwise.
"""
try:
self.GetArtifact(name)
except rdf_artifacts.ArtifactNotRegisteredError:
return False
return True | Checks whether the artifact of the specified name exists in the registry.
Args:
name: A name of the artifact.
Returns:
`True` if the artifact exists, `False` otherwise. | Exists | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def DeleteArtifactsFromDatastore(artifact_names, reload_artifacts=True):
"""Deletes a list of artifacts from the data store."""
artifacts_list = REGISTRY.GetArtifacts(
reload_datastore_artifacts=reload_artifacts
)
to_delete = set(artifact_names)
deps = set()
for artifact_obj in artifacts_list:
if artifact_obj.name in to_delete:
continue
if GetArtifactDependencies(artifact_obj) & to_delete:
deps.add(str(artifact_obj.name))
if deps:
raise ValueError(
"Artifact(s) %s depend(s) on one of the artifacts to delete."
% ",".join(deps)
)
found_artifact_names = set()
for artifact_value in artifacts_list:
if artifact_value.name in to_delete:
found_artifact_names.add(artifact_value.name)
if len(found_artifact_names) != len(to_delete):
not_found = to_delete - found_artifact_names
raise ValueError(
"Artifact(s) to delete (%s) not found." % ",".join(not_found)
)
for artifact_name in to_delete:
data_store.REL_DB.DeleteArtifact(str(artifact_name))
REGISTRY.UnregisterArtifact(artifact_name) | Deletes a list of artifacts from the data store. | DeleteArtifactsFromDatastore | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def ValidateSyntax(rdf_artifact):
"""Validates artifact syntax.
This method can be used to validate individual artifacts as they are loaded,
without needing all artifacts to be loaded first, as for Validate().
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactSyntaxError: If artifact syntax is invalid.
"""
if not rdf_artifact.doc:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "missing doc")
for supp_os in rdf_artifact.supported_os:
valid_os = rdf_artifact.SUPPORTED_OS_LIST
if supp_os not in valid_os:
detail = "invalid `supported_os` ('%s' not in %s)" % (supp_os, valid_os)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
kb_field_names = rdf_client.KnowledgeBase().GetKbFieldNames()
# Any %%blah%% path dependencies must be defined in the KnowledgeBase
for dep in GetArtifactPathDependencies(rdf_artifact):
if dep not in kb_field_names:
detail = f"broken path dependencies ({dep!r} not in {kb_field_names})"
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
for source in rdf_artifact.sources:
try:
source.Validate()
except rdf_artifacts.ArtifactSourceSyntaxError as e:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "bad source", e) | Validates artifact syntax.
This method can be used to validate individual artifacts as they are loaded,
without needing all artifacts to be loaded first, as for Validate().
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactSyntaxError: If artifact syntax is invalid. | ValidateSyntax | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def ValidateDependencies(rdf_artifact):
"""Validates artifact dependencies.
This method checks whether all dependencies of the artifact are present
and contain no errors.
This method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDependencyError: If a dependency is missing or contains errors.
"""
for dependency in GetArtifactDependencies(rdf_artifact):
try:
dependency_obj = REGISTRY.GetArtifact(dependency)
except rdf_artifacts.ArtifactNotRegisteredError as e:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "missing dependency", cause=e
)
message = dependency_obj.error_message
if message:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "dependency error", cause=message
) | Validates artifact dependencies.
This method checks whether all dependencies of the artifact are present
and contain no errors.
This method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDependencyError: If a dependency is missing or contains errors. | ValidateDependencies | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.