code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def FromJson(
cls,
json_str: str,
) -> "VersionTuple":
"""Creates a version tuple from a JSON response.
The JSON response must be serialized variant of the `ApiGetGrrVersionResult`
message.
Args:
json_str: A string object with version information JSON data.
Returns:
Parsed version tuple.
"""
result = metadata_pb2.ApiGetGrrVersionResult()
json_format.Parse(json_str, result, ignore_unknown_fields=True)
return cls.FromProto(result) | Creates a version tuple from a JSON response.
The JSON response must be serialized variant of the `ApiGetGrrVersionResult`
message.
Args:
json_str: A string object with version information JSON data.
Returns:
Parsed version tuple. | FromJson | python | google/grr | api_client/python/grr_api_client/connectors/http.py | https://github.com/google/grr/blob/master/api_client/python/grr_api_client/connectors/http.py | Apache-2.0 |
def FromProto(
cls,
proto: metadata_pb2.ApiGetGrrVersionResult,
) -> "VersionTuple":
"""Creates a version tuple from a server response.
Args:
proto: A server response with version information.
Returns:
Parsed version tuple.
"""
return VersionTuple(
major=proto.major,
minor=proto.minor,
revision=proto.revision,
release=proto.release) | Creates a version tuple from a server response.
Args:
proto: A server response with version information.
Returns:
Parsed version tuple. | FromProto | python | google/grr | api_client/python/grr_api_client/connectors/http.py | https://github.com/google/grr/blob/master/api_client/python/grr_api_client/connectors/http.py | Apache-2.0 |
def FromString(cls, string: str) -> "VersionTuple":
"""Creates a version tuple from a version string (like '1.3.3.post7').
Args:
string: A version string.
Returns:
Parsed version tuple.
"""
match = _VERSION_STRING_PATTERN.match(string)
if match is None:
raise ValueError(f"Incorrect version string: {string!r}")
return VersionTuple(
major=int(match[1]),
minor=int(match[2]),
revision=int(match[3]),
# TODO(hanuszczak): Replace with `str.removeprefix` once we support only
# Python 3.9+.
release=int(match[4][len("post"):] if match[4]
.startswith("post") else match[4])) | Creates a version tuple from a version string (like '1.3.3.post7').
Args:
string: A version string.
Returns:
Parsed version tuple. | FromString | python | google/grr | api_client/python/grr_api_client/connectors/http.py | https://github.com/google/grr/blob/master/api_client/python/grr_api_client/connectors/http.py | Apache-2.0 |
def _FetchVersion(self) -> Optional[VersionTuple]:
"""Fetches version information about the GRR server.
Note that it might be the case that the server version is so old that it
does not have the method for retrieving server version. In such case, the
method will return `None`.
Returns:
A message with version descriptor (if possible).
"""
headers = {
"x-csrftoken": self.csrf_token,
"x-requested-with": "XMLHttpRequest",
}
cookies = {
"csrftoken": self.csrf_token,
}
response = self.session.get(
url=f"{self.api_endpoint}/api/v2/metadata/version",
headers=headers,
cookies=cookies,
)
try:
self._CheckResponseStatus(response)
except errors.Error:
return None
json_str = response.content.decode("utf-8").lstrip(self.JSON_PREFIX)
return VersionTuple.FromJson(json_str) | Fetches version information about the GRR server.
Note that it might be the case that the server version is so old that it
does not have the method for retrieving server version. In such case, the
method will return `None`.
Returns:
A message with version descriptor (if possible). | _FetchVersion | python | google/grr | api_client/python/grr_api_client/connectors/http.py | https://github.com/google/grr/blob/master/api_client/python/grr_api_client/connectors/http.py | Apache-2.0 |
def _ValidateVersion(self):
"""Validates that the API client is compatible the GRR server.
In case version is impossible to validate (e.g. we are not running from
a PIP package), this function does nothing and skips validation.
Raises:
VersionMismatchError: If the API client is incompatible with the server.
"""
api_client_version = self.api_client_version
server_version = self.server_version
if api_client_version is None or server_version is None:
# If either of the versions is unspecified, we cannot properly validate.
return
if api_client_version < server_version:
raise errors.VersionMismatchError(
server_version=server_version, api_client_version=api_client_version) | Validates that the API client is compatible the GRR server.
In case version is impossible to validate (e.g. we are not running from
a PIP package), this function does nothing and skips validation.
Raises:
VersionMismatchError: If the API client is incompatible with the server. | _ValidateVersion | python | google/grr | api_client/python/grr_api_client/connectors/http.py | https://github.com/google/grr/blob/master/api_client/python/grr_api_client/connectors/http.py | Apache-2.0 |
def server_version(self) -> Optional[VersionTuple]:
"""Retrieves (lazily) the version server tuple."""
if self._server_version is None:
self._server_version = self._FetchVersion()
return self._server_version | Retrieves (lazily) the version server tuple. | server_version | python | google/grr | api_client/python/grr_api_client/connectors/http.py | https://github.com/google/grr/blob/master/api_client/python/grr_api_client/connectors/http.py | Apache-2.0 |
def api_client_version(self) -> Optional[VersionTuple]:
"""Retrieves (lazily) the API client version tuple (if possible)."""
if self._api_client_version is None:
try:
distribution = pkg_resources.get_distribution("grr_api_client")
except pkg_resources.DistributionNotFound:
# Distribution might not be available if we are not running from within
# a PIP package. In such case, it is not possible to retrieve version.
return None
self._api_client_version = VersionTuple.FromString(distribution.version)
return self._api_client_version | Retrieves (lazily) the API client version tuple (if possible). | api_client_version | python | google/grr | api_client/python/grr_api_client/connectors/http.py | https://github.com/google/grr/blob/master/api_client/python/grr_api_client/connectors/http.py | Apache-2.0 |
def make_ui_files():
"""Builds necessary assets from sources."""
# Install node_modules, but keep package(-lock).json frozen.
# Using shell=True, otherwise npm is not found in a nodeenv-built
# virtualenv on Windows.
subprocess.check_call(
"npm ci", shell=True, cwd="grr_response_server/gui/static")
subprocess.check_call(
"npm run gulp compile", shell=True, cwd="grr_response_server/gui/static")
# Compile UI v2.
subprocess.check_call("npm ci", shell=True, cwd="grr_response_server/gui/ui")
subprocess.check_call(
"npm run ng build --prod", shell=True, cwd="grr_response_server/gui/ui") | Builds necessary assets from sources. | make_ui_files | python | google/grr | grr/server/setup.py | https://github.com/google/grr/blob/master/grr/server/setup.py | Apache-2.0 |
def get_config():
"""Get relative path to version.ini file and the INI parser with its data."""
rel_ini_path = "version.ini"
ini_path = os.path.join(THIS_DIRECTORY, rel_ini_path)
if not os.path.exists(ini_path):
rel_ini_path = os.path.join("..", "..", "version.ini")
ini_path = os.path.join(THIS_DIRECTORY, rel_ini_path)
if not os.path.exists(ini_path):
raise RuntimeError("Couldn't find version.ini")
config = configparser.ConfigParser()
config.read(ini_path)
return rel_ini_path, config | Get relative path to version.ini file and the INI parser with its data. | get_config | python | google/grr | grr/server/setup.py | https://github.com/google/grr/blob/master/grr/server/setup.py | Apache-2.0 |
def testEventNotification(self):
"""Test that events are sent to listeners."""
TestListener.received_events = []
event = rdf_flows.GrrMessage(
session_id=rdfvalue.SessionID(flow_name="SomeFlow"),
name="test message",
payload=rdf_paths.PathSpec(path="foobar", pathtype="TSK"),
source="aff4:/C.0000000000000001",
auth_state="AUTHENTICATED",
)
events.Events.PublishEvent("TestEvent", event, username=self.test_username)
# Make sure the source is correctly propagated.
self.assertEqual(TestListener.received_events[0], event) | Test that events are sent to listeners. | testEventNotification | python | google/grr | grr/server/grr_response_server/events_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/events_test.py | Apache-2.0 |
def Register(cls: Type[base.ExportConverter]):
"""Registers an ExportConversion class.
Args:
cls: ExportConversion class.
"""
_EXPORT_CONVERTER_REGISTRY.add(cls) | Registers an ExportConversion class.
Args:
cls: ExportConversion class. | Register | python | google/grr | grr/server/grr_response_server/export_converters_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export_converters_registry.py | Apache-2.0 |
def Unregister(cls: Type[base.ExportConverter]):
"""Unregisters an ExportConversion class.
Args:
cls: ExportConversion class to be unregistered.
"""
_EXPORT_CONVERTER_REGISTRY.remove(cls) | Unregisters an ExportConversion class.
Args:
cls: ExportConversion class to be unregistered. | Unregister | python | google/grr | grr/server/grr_response_server/export_converters_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export_converters_registry.py | Apache-2.0 |
def ClearExportConverters():
"""Clears converters registry and its cached values."""
_EXPORT_CONVERTER_REGISTRY.clear() | Clears converters registry and its cached values. | ClearExportConverters | python | google/grr | grr/server/grr_response_server/export_converters_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export_converters_registry.py | Apache-2.0 |
def GetConvertersByClass(value_cls):
"""Returns all converters that take given value as an input value."""
results = [
cls
for cls in _EXPORT_CONVERTER_REGISTRY
if cls.input_rdf_type == value_cls
]
if not results:
results = [data_agnostic.DataAgnosticExportConverter]
return results | Returns all converters that take given value as an input value. | GetConvertersByClass | python | google/grr | grr/server/grr_response_server/export_converters_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export_converters_registry.py | Apache-2.0 |
def GetConvertersByValue(value):
"""Returns all converters that take given value as an input value."""
return GetConvertersByClass(value.__class__) | Returns all converters that take given value as an input value. | GetConvertersByValue | python | google/grr | grr/server/grr_response_server/export_converters_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/export_converters_registry.py | Apache-2.0 |
def testReceiveMessages(self):
"""Tests receiving messages."""
client_id = "C.1234567890123456"
flow_id = "12345678"
data_store.REL_DB.WriteClientMetadata(client_id)
before_flow_create = data_store.REL_DB.Now()
_, req = self._FlowSetup(client_id, flow_id)
after_flow_create = data_store.REL_DB.Now()
session_id = "%s/%s" % (client_id, flow_id)
messages = [
rdf_flows.GrrMessage(
request_id=1,
response_id=i,
session_id=session_id,
auth_state="AUTHENTICATED",
payload=rdfvalue.RDFInteger(i),
)
for i in range(1, 10)
]
ReceiveMessages(client_id, messages)
received = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
client_id, flow_id
)
self.assertLen(received, 1)
received_request = received[0][0]
self.assertEqual(received_request.client_id, req.client_id)
self.assertEqual(received_request.flow_id, req.flow_id)
self.assertEqual(received_request.request_id, req.request_id)
self.assertBetween(
received_request.timestamp, before_flow_create, after_flow_create
)
self.assertLen(received[0][1], 9) | Tests receiving messages. | testReceiveMessages | python | google/grr | grr/server/grr_response_server/frontend_lib_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib_test.py | Apache-2.0 |
def AddFile(self, hash_id: rdf_objects.HashID, metadata: FileMetadata):
"""Add a new file to the file store."""
raise NotImplementedError() | Add a new file to the file store. | AddFile | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def AddFiles(self, hash_id_metadatas: Dict[rdf_objects.HashID, FileMetadata]):
"""Adds multiple files to the file store.
Args:
hash_id_metadatas: A dictionary mapping hash ids to file metadata (a tuple
of hash client path and blob references).
"""
for hash_id, metadata in hash_id_metadatas.items():
self.AddFile(hash_id, metadata) | Adds multiple files to the file store.
Args:
hash_id_metadatas: A dictionary mapping hash ids to file metadata (a tuple
of hash client path and blob references). | AddFiles | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def _GetChunk(
self,
) -> tuple[Optional[bytes], Optional[rdf_objects.BlobReference]]:
"""Fetches a chunk corresponding to the current offset."""
found_ref = None
for ref in self._blob_refs:
if self._offset >= ref.offset and self._offset < (ref.offset + ref.size):
found_ref = ref
break
if not found_ref:
return None, None
# If self._current_ref == found_ref, then simply return previously found
# chunk. Otherwise, update self._current_chunk value.
if self._current_ref != found_ref:
self._current_ref = found_ref
blob_id = models_blob.BlobID(found_ref.blob_id)
data = data_store.BLOBS.ReadBlobs([blob_id])
if data[blob_id] is None:
raise BlobNotFoundError(blob_id)
self._current_chunk = data[blob_id]
return self._current_chunk, self._current_ref | Fetches a chunk corresponding to the current offset. | _GetChunk | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def Read(self, length: Optional[int] = None) -> bytes:
"""Reads data."""
if length is None:
length = self._length - self._offset
# Only enforce limit when length is not specified manually.
if length > self._max_unbound_read:
raise OversizedReadError(
"Attempted to read %d bytes when Server.max_unbound_read_size is %d"
% (length, self._max_unbound_read)
)
result = io.BytesIO()
while result.tell() < length:
chunk, ref = self._GetChunk()
if not chunk:
break
part = chunk[self._offset - ref.offset :]
if not part:
break
result.write(part)
self._offset += min(length, len(part))
return result.getvalue()[:length] | Reads data. | Read | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def Tell(self) -> int:
"""Returns current reading cursor position."""
return self._offset | Returns current reading cursor position. | Tell | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def Seek(self, offset: int, whence=os.SEEK_SET) -> None:
"""Moves the reading cursor."""
if whence == os.SEEK_SET:
self._offset = offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = self._length + offset
else:
raise ValueError("Invalid whence argument: %s" % whence) | Moves the reading cursor. | Seek | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def size(self) -> int:
"""Size of the hashed data."""
return self._length | Size of the hashed data. | size | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def hash_id(self) -> rdf_objects.HashID:
"""Hash ID identifying hashed data."""
return self._hash_id | Hash ID identifying hashed data. | hash_id | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def AddFilesWithUnknownHashes(
client_path_blob_refs: Dict[
db.ClientPath, Iterable[rdf_objects.BlobReference]
],
use_external_stores: bool = True,
) -> Dict[db.ClientPath, rdf_objects.SHA256HashID]:
"""Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found.
InvalidBlobSizeError: if reference's blob size is different from an
actual blob size.
InvalidBlobOffsetError: if reference's blob offset is different from an
actual blob offset.
"""
hash_id_blob_refs = dict()
client_path_hash_id = dict()
metadatas = dict()
all_client_path_blob_refs = list()
for client_path, blob_refs in client_path_blob_refs.items():
if blob_refs:
for blob_ref in blob_refs:
blob_ref = mig_objects.ToProtoBlobReference(blob_ref)
all_client_path_blob_refs.append((client_path, blob_ref))
else:
# Make sure empty files (without blobs) are correctly handled.
hash_id = rdf_objects.SHA256HashID.FromData(b"")
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = []
metadatas[hash_id] = FileMetadata(client_path=client_path, blob_refs=[])
client_path_offset = collections.defaultdict(lambda: 0)
client_path_sha256 = collections.defaultdict(hashlib.sha256)
verified_client_path_blob_refs = collections.defaultdict(list)
client_path_blob_ref_batches = collection.Batch(
items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE
)
for client_path_blob_ref_batch in client_path_blob_ref_batches:
blob_id_batch = set(
models_blob.BlobID(blob_ref.blob_id)
for _, blob_ref in client_path_blob_ref_batch
)
blobs = data_store.BLOBS.ReadAndWaitForBlobs(
blob_id_batch, timeout=BLOBS_READ_TIMEOUT
)
for client_path, blob_ref in client_path_blob_ref_batch:
blob = blobs[models_blob.BlobID(blob_ref.blob_id)]
if blob is None:
raise BlobNotFoundError(blob_ref.blob_id)
offset = client_path_offset[client_path]
if blob_ref.size != len(blob):
raise InvalidBlobSizeError(
"Got conflicting size information for blob %s: %d vs %d."
% (blob_ref.blob_id, blob_ref.size, len(blob))
)
if blob_ref.offset != offset:
raise InvalidBlobOffsetError(
"Got conflicting offset information for blob %s: %d vs %d."
% (blob_ref.blob_id, blob_ref.offset, offset)
)
verified_client_path_blob_refs[client_path].append(blob_ref)
client_path_offset[client_path] = offset + len(blob)
client_path_sha256[client_path].update(blob)
for client_path in client_path_sha256.keys():
sha256 = client_path_sha256[client_path].digest()
hash_id = rdf_objects.SHA256HashID.FromSerializedBytes(sha256)
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]
data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)
if use_external_stores:
for client_path in verified_client_path_blob_refs.keys():
metadatas[client_path_hash_id[client_path]] = FileMetadata(
client_path=client_path,
blob_refs=list(
map(
mig_objects.ToRDFBlobReference,
verified_client_path_blob_refs[client_path],
)
),
)
EXTERNAL_FILE_STORE.AddFiles(metadatas)
return client_path_hash_id | Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found.
InvalidBlobSizeError: if reference's blob size is different from an
actual blob size.
InvalidBlobOffsetError: if reference's blob offset is different from an
actual blob offset. | AddFilesWithUnknownHashes | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def AddFileWithUnknownHash(
client_path: db.ClientPath,
blob_refs: Sequence[rdf_objects.BlobReference],
use_external_stores: bool = True,
) -> Dict[db.ClientPath, rdf_objects.SHA256HashID]:
"""Add a new file consisting of given blob IDs."""
precondition.AssertType(client_path, db.ClientPath)
precondition.AssertIterableType(blob_refs, rdf_objects.BlobReference)
return AddFilesWithUnknownHashes(
{client_path: blob_refs}, use_external_stores=use_external_stores
)[client_path] | Add a new file consisting of given blob IDs. | AddFileWithUnknownHash | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def CheckHashes(
hash_ids: Collection[rdf_objects.SHA256HashID],
) -> Dict[rdf_objects.SHA256HashID, bool]:
"""Checks if files with given hashes are present in the file store.
Args:
hash_ids: A list of SHA256HashID objects.
Returns:
A dict where SHA256HashID objects are keys. Corresponding values
may be False (if hash id is not present) or True if it is not present.
"""
return {
k: bool(v)
for k, v in data_store.REL_DB.ReadHashBlobReferences(hash_ids).items()
} | Checks if files with given hashes are present in the file store.
Args:
hash_ids: A list of SHA256HashID objects.
Returns:
A dict where SHA256HashID objects are keys. Corresponding values
may be False (if hash id is not present) or True if it is not present. | CheckHashes | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def OpenFile(
client_path: db.ClientPath,
max_timestamp: Optional[rdfvalue.RDFDatetime] = None,
) -> BlobStream:
"""Opens latest content of a given file for reading.
Args:
client_path: A path to a file.
max_timestamp: If specified, will open the last collected version with a
timestamp equal or lower than max_timestamp. If not specified, will simply
open the latest version.
Returns:
A file like object with random access support.
Raises:
FileHasNoContentError: if the file was never collected.
MissingBlobReferencesError: if one of the blobs was not found.
"""
proto_path_info = data_store.REL_DB.ReadLatestPathInfosWithHashBlobReferences(
[client_path], max_timestamp=max_timestamp
)[client_path]
path_info = None
if proto_path_info:
path_info = mig_objects.ToRDFPathInfo(proto_path_info)
if path_info is None:
# If path_info returned by ReadLatestPathInfosWithHashBlobReferences
# is None, do one more ReadPathInfo call to check if this path info
# was ever present in the database.
try:
data_store.REL_DB.ReadPathInfo(
client_path.client_id, client_path.path_type, client_path.components
)
except db.UnknownPathError:
raise FileNotFoundError(client_path)
# If the given path info is present in the database, but there are
# no suitable hash blob references associated with it, raise
# FileHasNoContentError instead of FileNotFoundError.
raise FileHasNoContentError(client_path)
hash_id = rdf_objects.SHA256HashID.FromSerializedBytes(
path_info.hash_entry.sha256.AsBytes()
)
blob_references = data_store.REL_DB.ReadHashBlobReferences([hash_id])[hash_id]
if blob_references is None:
raise MissingBlobReferencesError(
"File hash was expected to have corresponding "
"blob references, but they were not found: %r" % hash_id
)
blob_references = list(map(mig_objects.ToRDFBlobReference, blob_references))
return BlobStream(client_path, blob_references, hash_id) | Opens latest content of a given file for reading.
Args:
client_path: A path to a file.
max_timestamp: If specified, will open the last collected version with a
timestamp equal or lower than max_timestamp. If not specified, will simply
open the latest version.
Returns:
A file like object with random access support.
Raises:
FileHasNoContentError: if the file was never collected.
MissingBlobReferencesError: if one of the blobs was not found. | OpenFile | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def __init__(
self,
client_path: db.ClientPath,
data: bytes,
chunk_index: int,
total_chunks: int,
offset: int,
total_size: int,
) -> None:
"""Initializes StreamedFileChunk object.
Args:
client_path: db.ClientPath identifying the file.
data: bytes with chunk's contents.
chunk_index: Index of this chunk (relative to the sequence of chunks
corresponding to the file).
total_chunks: Total number of chunks corresponding to a given file.
offset: Offset of this chunk in bytes from the beginning of the file.
total_size: Total size of the file in bytes.
"""
self.client_path = client_path
self.data = data
self.offset = offset
self.total_size = total_size
self.chunk_index = chunk_index
self.total_chunks = total_chunks | Initializes StreamedFileChunk object.
Args:
client_path: db.ClientPath identifying the file.
data: bytes with chunk's contents.
chunk_index: Index of this chunk (relative to the sequence of chunks
corresponding to the file).
total_chunks: Total number of chunks corresponding to a given file.
offset: Offset of this chunk in bytes from the beginning of the file.
total_size: Total size of the file in bytes. | __init__ | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def StreamFilesChunks(
client_paths: Collection[db.ClientPath],
max_timestamp: Optional[rdfvalue.RDFDatetime] = None,
max_size: Optional[int] = None,
) -> Iterable[StreamedFileChunk]:
"""Streams contents of given files.
Args:
client_paths: db.ClientPath objects describing paths to files.
max_timestamp: If specified, then for every requested file will open the
last collected version of the file with a timestamp equal or lower than
max_timestamp. If not specified, will simply open a latest version for
each file.
max_size: If specified, only the chunks covering max_size bytes will be
returned.
Yields:
StreamedFileChunk objects for every file read. Chunks will be returned
sequentially, their order will correspond to the client_paths order.
Files having no content will simply be ignored.
Raises:
BlobNotFoundError: if one of the blobs wasn't found while streaming.
"""
proto_path_infos_by_cp = (
data_store.REL_DB.ReadLatestPathInfosWithHashBlobReferences(
client_paths, max_timestamp=max_timestamp
)
)
path_infos_by_cp = {}
for k, v in proto_path_infos_by_cp.items():
path_infos_by_cp[k] = None
if v is not None:
path_infos_by_cp[k] = v
hash_ids_by_cp = {}
for cp, pi in path_infos_by_cp.items():
if pi:
hash_ids_by_cp[cp] = rdf_objects.SHA256HashID.FromSerializedBytes(
pi.hash_entry.sha256
)
blob_refs_by_hash_id = data_store.REL_DB.ReadHashBlobReferences(
hash_ids_by_cp.values()
)
all_chunks = []
for cp in client_paths:
try:
hash_id = hash_ids_by_cp[cp]
except KeyError:
continue
try:
blob_refs = blob_refs_by_hash_id[hash_id]
except KeyError:
continue
num_blobs = len(blob_refs)
total_size = 0
for ref in blob_refs:
total_size += ref.size
cur_size = 0
for i, ref in enumerate(blob_refs):
blob_id = models_blob.BlobID(ref.blob_id)
all_chunks.append((cp, blob_id, i, num_blobs, ref.offset, total_size))
cur_size += ref.size
if max_size is not None and cur_size >= max_size:
break
for batch in collection.Batch(all_chunks, STREAM_CHUNKS_READ_AHEAD):
blobs = data_store.BLOBS.ReadBlobs(
[blob_id for _, blob_id, _, _, _, _ in batch]
)
for cp, blob_id, i, num_blobs, offset, total_size in batch:
blob_data = blobs[blob_id]
if blob_data is None:
raise BlobNotFoundError(blob_id)
yield StreamedFileChunk(cp, blob_data, i, num_blobs, offset, total_size) | Streams contents of given files.
Args:
client_paths: db.ClientPath objects describing paths to files.
max_timestamp: If specified, then for every requested file will open the
last collected version of the file with a timestamp equal or lower than
max_timestamp. If not specified, will simply open a latest version for
each file.
max_size: If specified, only the chunks covering max_size bytes will be
returned.
Yields:
StreamedFileChunk objects for every file read. Chunks will be returned
sequentially, their order will correspond to the client_paths order.
Files having no content will simply be ignored.
Raises:
BlobNotFoundError: if one of the blobs wasn't found while streaming. | StreamFilesChunks | python | google/grr | grr/server/grr_response_server/file_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/file_store.py | Apache-2.0 |
def RetrieveIP4Info(self, ip):
"""Retrieves information for an IP4 address."""
if ip.is_private:
return (IPInfo.INTERNAL, "Internal IP address.")
try:
# It's an external IP, let's try to do a reverse lookup.
res = socket.getnameinfo((str(ip), 0), socket.NI_NAMEREQD)
return (IPInfo.EXTERNAL, res[0])
except (socket.error, socket.herror, socket.gaierror):
return (IPInfo.EXTERNAL, "Unknown IP address.") | Retrieves information for an IP4 address. | RetrieveIP4Info | python | google/grr | grr/server/grr_response_server/ip_resolver.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/ip_resolver.py | Apache-2.0 |
def RetrieveIP6Info(self, ip):
"""Retrieves information for an IP6 address."""
_ = ip
return (IPInfo.INTERNAL, "Internal IP6 address.") | Retrieves information for an IP6 address. | RetrieveIP6Info | python | google/grr | grr/server/grr_response_server/ip_resolver.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/ip_resolver.py | Apache-2.0 |
def IPResolverInitOnce():
"""Initializes IP resolver."""
global IP_RESOLVER
ip_resolver_cls_name = config.CONFIG["Server.ip_resolver_class"]
logging.debug("Using ip resolver: %s", ip_resolver_cls_name)
cls = IPResolverBase.GetPlugin(ip_resolver_cls_name)
IP_RESOLVER = cls() | Initializes IP resolver. | IPResolverInitOnce | python | google/grr | grr/server/grr_response_server/ip_resolver.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/ip_resolver.py | Apache-2.0 |
def Sign(self, command: execute_signed_command_pb2.Command) -> bytes:
"""Signs a command and returns the signature.""" | Signs a command and returns the signature. | Sign | python | google/grr | grr/server/grr_response_server/command_signer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/command_signer.py | Apache-2.0 |
def Verify(
self,
signature: bytes,
command: execute_signed_command_pb2.Command,
) -> None:
"""Validates a signature for given data with a verification key.
Args:
signature: Signature to verify.
command: Command that was signed.
Raises:
CommandSignatureValidationError: Invalid signature
""" | Validates a signature for given data with a verification key.
Args:
signature: Signature to verify.
command: Command that was signed.
Raises:
CommandSignatureValidationError: Invalid signature | Verify | python | google/grr | grr/server/grr_response_server/command_signer.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/command_signer.py | Apache-2.0 |
def testNotMatchingArgTypeRaises(self):
"""Check that flows reject not matching args type."""
with self.assertRaises(TypeError):
flow.StartFlow(
client_id=self.client_id,
flow_cls=CallStateFlow,
flow_args=dummy.DummyArgs(),
) | Check that flows reject not matching args type. | testNotMatchingArgTypeRaises | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testCallState(self):
"""Test the ability to chain flows."""
CallStateFlow.success = False
# Run the flow in the simulated way
flow_test_lib.StartAndRunFlow(CallStateFlow, client_id=self.client_id)
self.assertEqual(CallStateFlow.success, True) | Test the ability to chain flows. | testCallState | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testCallStateProto(self):
"""Test the ability to chain states."""
CallStateProtoFlow.success = False
# Run the flow in the simulated way
flow_test_lib.StartAndRunFlow(CallStateProtoFlow, client_id=self.client_id)
self.assertTrue(CallStateProtoFlow.success) | Test the ability to chain states. | testCallStateProto | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testCallStateWithResponses(self):
"""Test the ability to chain flows."""
CallStateFlowWithResponses.success = False
# Run the flow in the simulated way
flow_test_lib.StartAndRunFlow(
CallStateFlowWithResponses, client_id=self.client_id
)
self.assertTrue(CallStateFlowWithResponses.success) | Test the ability to chain flows. | testCallStateWithResponses | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testCallStateProtoWithResponsesMixed(self):
"""Test the ability to chain states."""
CallStateProtoFlowWithResponsesMixed.success = False
# Run the flow in the simulated way
flow_test_lib.StartAndRunFlow(
CallStateProtoFlowWithResponsesMixed, client_id=self.client_id
)
self.assertTrue(CallStateProtoFlowWithResponsesMixed.success) | Test the ability to chain states. | testCallStateProtoWithResponsesMixed | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testCallStateProtoWithResponsesOnlyProtos(self):
"""Test the ability to chain states."""
CallStateProtoFlowWithResponsesOnlyProtos.success = False
# Run the flow in the simulated way
flow_test_lib.StartAndRunFlow(
CallStateProtoFlowWithResponsesOnlyProtos, client_id=self.client_id
)
self.assertTrue(CallStateProtoFlowWithResponsesOnlyProtos.success) | Test the ability to chain states. | testCallStateProtoWithResponsesOnlyProtos | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testChainedFlow(self):
"""Test the ability to chain flows."""
ParentFlow.success = False
# Run the flow in the simulated way
flow_test_lib.StartAndRunFlow(
ParentFlow, client_mock=ClientMock(), client_id=self.client_id
)
self.assertEqual(ParentFlow.success, True) | Test the ability to chain flows. | testChainedFlow | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testBrokenChainedFlowProto(self):
# TODO: Add child flow with arguments and check that they are
# passed correctly from parent to child.
class BrokenParentCallFlowProto(flow_base.FlowBase):
"""This flow will launch a broken child flow."""
# This is a global flag which will be set when the flow runs.
success = False
def Start(self):
# Call the child flow.
self.CallFlowProto("BrokenChildFlow", next_state="ReceiveHello")
def ReceiveHello(self, responses):
if responses or responses.status.status == "OK":
raise RuntimeError("Error not propagated to parent")
BrokenParentCallFlowProto.success = True
# The parent flow does not fail, just assert the child does.
flow_test_lib.StartAndRunFlow(
BrokenParentCallFlowProto,
client_mock=ClientMock(),
client_id=self.client_id,
check_flow_errors=False,
)
self.assertEqual(BrokenParentCallFlowProto.success, True) | This flow will launch a broken child flow. | testBrokenChainedFlowProto | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testLimitPropagation(self):
"""This tests that client actions are limited properly."""
client_mock = action_mocks.CPULimitClientMock(
user_cpu_usage=[10],
system_cpu_usage=[10],
network_usage=[1000],
runtime_us=[rdfvalue.Duration.From(1, rdfvalue.SECONDS)],
)
flow_test_lib.StartAndRunFlow(
flow_test_lib.CPULimitFlow,
client_mock=client_mock,
client_id=self.client_id,
cpu_limit=1000,
network_bytes_limit=10000,
runtime_limit=rdfvalue.Duration.From(5, rdfvalue.SECONDS),
)
self.assertEqual(client_mock.storage["cpulimit"], [1000, 980, 960])
self.assertEqual(client_mock.storage["networklimit"], [10000, 9000, 8000])
self.assertEqual(client_mock.storage["networklimit"], [10000, 9000, 8000])
self.assertEqual(
client_mock.storage["runtimelimit"],
[
rdfvalue.Duration.From(5, rdfvalue.SECONDS),
rdfvalue.Duration.From(4, rdfvalue.SECONDS),
rdfvalue.Duration.From(3, rdfvalue.SECONDS),
],
) | This tests that client actions are limited properly. | testLimitPropagation | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testCPULimitExceeded(self):
"""This tests that the cpu limit for flows is working."""
client_mock = action_mocks.CPULimitClientMock(
user_cpu_usage=[10], system_cpu_usage=[10], network_usage=[1000]
)
with test_lib.SuppressLogs():
flow_id = flow_test_lib.StartAndRunFlow(
flow_test_lib.CPULimitFlow,
client_mock=client_mock,
client_id=self.client_id,
cpu_limit=30,
network_bytes_limit=10000,
check_flow_errors=False,
)
rdf_flow = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id)
self.assertEqual(rdf_flow.flow_state, flows_pb2.Flow.FlowState.ERROR)
self.assertIn("CPU limit exceeded", rdf_flow.error_message) | This tests that the cpu limit for flows is working. | testCPULimitExceeded | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testNetworkLimitExceeded(self):
"""This tests that the network limit for flows is working."""
client_mock = action_mocks.CPULimitClientMock(
user_cpu_usage=[10], system_cpu_usage=[10], network_usage=[1000]
)
with test_lib.SuppressLogs():
flow_id = flow_test_lib.StartAndRunFlow(
flow_test_lib.CPULimitFlow,
client_mock=client_mock,
client_id=self.client_id,
cpu_limit=1000,
network_bytes_limit=1500,
check_flow_errors=False,
)
rdf_flow = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id)
self.assertEqual(rdf_flow.flow_state, flows_pb2.Flow.FlowState.ERROR)
self.assertIn("bytes limit exceeded", rdf_flow.error_message) | This tests that the network limit for flows is working. | testNetworkLimitExceeded | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testUserGetsNotificationWithNumberOfResultsProto(self):
username = "notification_test_user"
self.CreateUser(username)
class FlowWithMultipleResultTypesProto(flow_base.FlowBase):
"""Flow with multiple result types."""
proto_result_types = (
jobs_pb2.LogMessage,
jobs_pb2.PathSpec,
jobs_pb2.ClientInformation,
)
def Start(self):
self.SendReplyProto(jobs_pb2.LogMessage(data="foo"))
self.SendReplyProto(jobs_pb2.PathSpec(path="bar.txt"))
self.SendReplyProto(jobs_pb2.PathSpec(path="baz.txt"))
self.SendReplyProto(jobs_pb2.ClientInformation(client_name="foo"))
self.SendReplyProto(jobs_pb2.ClientInformation(client_name="bar"))
self.SendReplyProto(jobs_pb2.ClientInformation(client_name="baz"))
flow_test_lib.StartAndRunFlow(
FlowWithMultipleResultTypesProto,
client_id=self.client_id,
creator=username,
)
notifications = self.GetUserNotifications(username)
self.assertIn(
"FlowWithMultipleResultTypesProto completed with 6 results",
notifications[0].message,
) | Flow with multiple result types. | testUserGetsNotificationWithNumberOfResultsProto | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testOutputPluginsOnlyRunInParentFlow_DoesNotForwardProto(self):
class ChildFlowProtoIgnored(flow_base.FlowBase):
proto_result_types = (jobs_pb2.LogMessage,)
def Start(self):
self.SendReplyProto(jobs_pb2.LogMessage(data="IgnoredInParent"))
class ParentFlowWithoutForwardingOutputPluginsProto(flow_base.FlowBase):
"""This flow creates a Child without forwarding OutputPlugins."""
proto_result_types = (jobs_pb2.LogMessage,)
def Start(self):
# Call the child flow WITHOUT output plugins.
self.CallFlowProto(
ChildFlowProtoIgnored.__name__, next_state="IgnoreChildReplies"
)
def IgnoreChildReplies(self, responses):
del responses # Unused
self.SendReplyProto(jobs_pb2.LogMessage(data="Parent received"))
self.RunFlow(
flow_cls=ParentFlowWithoutForwardingOutputPluginsProto,
client_mock=ClientMock(),
output_plugins=[
rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyFlowOutputPlugin"
)
],
)
# Parent calls once, and child doesn't call.
self.assertEqual(test_output_plugins.DummyFlowOutputPlugin.num_calls, 1)
# Parent has one response, child has two.
self.assertEqual(test_output_plugins.DummyFlowOutputPlugin.num_responses, 1) | This flow creates a Child without forwarding OutputPlugins. | testOutputPluginsOnlyRunInParentFlow_DoesNotForwardProto | python | google/grr | grr/server/grr_response_server/flow_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_test.py | Apache-2.0 |
def testAllConfigs(self):
"""Go through all our config files looking for errors."""
# Test the current loaded configuration.
configs = [config.CONFIG]
# Test all the other configs in the server config dir (/etc/grr by default)
glob_path = os.path.join(config.CONFIG["Config.directory"], "*.yaml")
for cfg_file in glob.glob(glob_path):
if os.access(cfg_file, os.R_OK):
configs.append(cfg_file)
else:
logging.info(
"Skipping checking %s, you probably need to be root", cfg_file
)
self.ValidateConfigs(configs) | Go through all our config files looking for errors. | testAllConfigs | python | google/grr | grr/server/grr_response_server/config_validation_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/config_validation_test.py | Apache-2.0 |
def setupAndRun(self, cls: type[flow_base.FlowBase]) -> flows_pb2.Flow:
"""Sets up and runs a flow of the given type."""
assert data_store.REL_DB is not None
db = data_store.REL_DB
client_id = db_test_utils.InitializeClient(db)
test_username = db_test_utils.InitializeUser(db)
flow_id = flow_test_lib.StartAndRunFlow(
cls,
action_mocks.ActionMock(action_mocks.Store),
client_id=client_id,
creator=test_username,
)
return db.ReadFlowObject(client_id, flow_id) | Sets up and runs a flow of the given type. | setupAndRun | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallState(self):
class StoreCallStateFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallState(next_state="AfterCallState")
def AfterCallState(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallState!"
flow = self.setupAndRun(StoreCallStateFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallState!") | Dummy flow that uses store. | testStorePersists_CallState | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallStateProto(self):
class StoreCallStateProtoFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallStateProto(next_state="AfterCallStateProto")
def AfterCallStateProto(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallStateProto!"
flow = self.setupAndRun(StoreCallStateProtoFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallStateProto!") | Dummy flow that uses store. | testStorePersists_CallStateProto | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallStateInline(self):
class StoreCallStateInlineFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallStateInline(next_state="AfterCallStateInline")
def AfterCallStateInline(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallStateInline!"
flow = self.setupAndRun(StoreCallStateInlineFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallStateInline!") | Dummy flow that uses store. | testStorePersists_CallStateInline | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallStateInlineProto(self):
class StoreCallStateInlineProtoFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallStateInlineProto(next_state="AfterCallStateInlineProto")
@flow_base.UseProto2AnyResponses
def AfterCallStateInlineProto(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallStateInlineProto!"
flow = self.setupAndRun(StoreCallStateInlineProtoFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallStateInlineProto!") | Dummy flow that uses store. | testStorePersists_CallStateInlineProto | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallStateInlineProtoWithResponses(self):
class StoreCallStateInlineProtoWithResponsesFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallStateInlineProtoWithResponses(
next_state="AfterCallStateInlineProtoWithResponses"
)
@flow_base.UseProto2AnyResponses
def AfterCallStateInlineProtoWithResponses(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallStateInlineProtoWithResponses!"
flow = self.setupAndRun(StoreCallStateInlineProtoWithResponsesFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(
store.msg, "Hello from AfterCallStateInlineProtoWithResponses!"
) | Dummy flow that uses store. | testStorePersists_CallStateInlineProtoWithResponses | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallFlow(self):
class StoreCallFlowFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallFlow(
flow_test_lib.DummyFlowWithSingleReply.__name__,
next_state="AfterCallFlow",
)
def AfterCallFlow(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallFlow!"
flow = self.setupAndRun(StoreCallFlowFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallFlow!") | Dummy flow that uses store. | testStorePersists_CallFlow | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallFlowProto(self):
class StoreCallFlowProtoFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallFlowProto(
flow_test_lib.DummyFlowWithSingleReply.__name__,
next_state="AfterCallFlowProto",
)
def AfterCallFlowProto(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallFlowProto!"
flow = self.setupAndRun(StoreCallFlowProtoFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallFlowProto!") | Dummy flow that uses store. | testStorePersists_CallFlowProto | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallClient(self):
class StoreCallClientFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallClient(
action_registry.ACTION_STUB_BY_ID["Store"],
request=rdf_protodict.DataBlob(string="Hey!"),
next_state="AfterCallClient",
)
def AfterCallClient(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallClient!"
flow = self.setupAndRun(StoreCallClientFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallClient!") | Dummy flow that uses store. | testStorePersists_CallClient | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def testStorePersists_CallClientProto(self):
class StoreCallClientProtoFlow(
flow_base.FlowBase[flows_pb2.EmptyFlowArgs, tests_pb2.DummyFlowStore]
):
"""Dummy flow that uses store."""
proto_store_type = tests_pb2.DummyFlowStore
def Start(self) -> None:
self.store.msg = "Hello from Start!"
self.CallClientProto(
action_registry.ACTION_STUB_BY_ID["Store"],
action_args=jobs_pb2.DataBlob(string="Hey!"),
next_state="AfterCallClientProto",
)
def AfterCallClientProto(self, responses=None):
del responses
assert self.store.msg == "Hello from Start!"
self.store.msg = "Hello from AfterCallClientProto!"
flow = self.setupAndRun(StoreCallClientProtoFlow)
self.assertTrue(flow.HasField("store"))
store = tests_pb2.DummyFlowStore()
flow.store.Unpack(store)
self.assertEqual(store.msg, "Hello from AfterCallClientProto!") | Dummy flow that uses store. | testStorePersists_CallClientProto | python | google/grr | grr/server/grr_response_server/flow_base_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base_test.py | Apache-2.0 |
def ProcessMessageHandlerRequests(
requests: Sequence[objects_pb2.MessageHandlerRequest],
) -> None:
"""Processes message handler requests."""
logging.info(
"Leased message handler request ids: %s",
",".join(str(r.request_id) for r in requests),
)
grouped_requests = collection.Group(requests, lambda r: r.handler_name)
for handler_name, requests_for_handler in grouped_requests.items():
requests_for_handler = [
mig_objects.ToRDFMessageHandlerRequest(r) for r in requests_for_handler
]
handler_cls = handler_registry.handler_name_map.get(handler_name)
if not handler_cls:
logging.error("Unknown message handler: %s", handler_name)
continue
num_requests = len(requests_for_handler)
WELL_KNOWN_FLOW_REQUESTS.Increment(
fields=[handler_name], delta=num_requests
)
try:
logging.debug(
"Running %d messages for handler %s", num_requests, handler_name
)
handler_cls().ProcessMessages(requests_for_handler)
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Exception while processing message handler %s: %s", handler_name, e
)
logging.info(
"Deleting message handler request ids: %s",
",".join(str(r.request_id) for r in requests),
)
data_store.REL_DB.DeleteMessageHandlerRequests(requests) | Processes message handler requests. | ProcessMessageHandlerRequests | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def __init__(self, disabled: bool = False):
"""Constructor."""
self.disabled = disabled
logging.info("Started GRR worker.") | Constructor. | __init__ | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def Run(self) -> None:
"""Event loop."""
if not self.disabled:
data_store.REL_DB.RegisterMessageHandler(
ProcessMessageHandlerRequests,
self.message_handler_lease_time,
limit=100,
)
data_store.REL_DB.RegisterFlowProcessingHandler(self.ProcessFlow)
try:
# The main thread just keeps sleeping and listens to keyboard interrupt
# events in case the server is running from a console.
while True:
time.sleep(3600)
except KeyboardInterrupt:
logging.info("Caught interrupt, exiting.")
self.Shutdown() | Event loop. | Run | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def _ReleaseProcessedFlow(self, flow_obj: rdf_flow_objects.Flow) -> bool:
"""Release a processed flow if the processing deadline is not exceeded."""
rdf_flow = flow_obj.rdf_flow
if rdf_flow.processing_deadline < rdfvalue.RDFDatetime.Now():
raise flow_base.FlowError(
"Lease expired for flow %s on %s (%s)."
% (
rdf_flow.flow_id,
rdf_flow.client_id,
rdf_flow.processing_deadline,
),
)
flow_obj.FlushQueuedMessages()
proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow)
return data_store.REL_DB.ReleaseProcessedFlow(proto_flow) | Release a processed flow if the processing deadline is not exceeded. | _ReleaseProcessedFlow | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def ProcessFlow(
self, flow_processing_request: flows_pb2.FlowProcessingRequest
) -> None:
"""The callback for the flow processing queue."""
client_id = flow_processing_request.client_id
flow_id = flow_processing_request.flow_id
data_store.REL_DB.AckFlowProcessingRequests([flow_processing_request])
try:
flow = data_store.REL_DB.LeaseFlowForProcessing(
client_id,
flow_id,
processing_time=rdfvalue.Duration.From(6, rdfvalue.HOURS),
)
except db.ParentHuntIsNotRunningError:
flow_base.TerminateFlow(client_id, flow_id, "Parent hunt stopped.")
return
rdf_flow = mig_flow_objects.ToRDFFlow(flow)
first_request_to_process = rdf_flow.next_request_to_process
logging.info(
"Processing Flow %s/%s/%d (%s).",
client_id,
flow_id,
first_request_to_process,
rdf_flow.flow_class_name,
)
flow_cls = registry.FlowRegistry.FlowClassByName(rdf_flow.flow_class_name)
flow_obj = flow_cls(rdf_flow)
if not flow_obj.IsRunning():
logging.info(
"Received a request to process flow %s on client %s that is not "
"running.",
flow_id,
client_id,
)
return
processed, incrementally_processed = flow_obj.ProcessAllReadyRequests()
if processed == 0 and incrementally_processed == 0:
raise FlowHasNothingToProcessError(
"Unable to process any requests for flow %s on client %s."
% (flow_id, client_id)
)
while not self._ReleaseProcessedFlow(flow_obj):
processed, incrementally_processed = flow_obj.ProcessAllReadyRequests()
if processed == 0 and incrementally_processed == 0:
raise FlowHasNothingToProcessError(
"%s/%s: ReleaseProcessedFlow returned false but no "
"request could be processed (next req: %d)."
% (client_id, flow_id, flow_obj.rdf_flow.next_request_to_process)
)
if flow_obj.IsRunning():
logging.info(
"Processing Flow %s/%s/%d (%s) done, next request to process: %d.",
client_id,
flow_id,
first_request_to_process,
rdf_flow.flow_class_name,
rdf_flow.next_request_to_process,
)
else:
logging.info(
"Processing Flow %s/%s/%d (%s) done, flow is done.",
client_id,
flow_id,
first_request_to_process,
rdf_flow.flow_class_name,
) | The callback for the flow processing queue. | ProcessFlow | python | google/grr | grr/server/grr_response_server/worker_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/worker_lib.py | Apache-2.0 |
def CreateBlobStore(self):
"""Create a test blob store.
Returns:
A tuple (blob_store, cleanup), where blob_store is an instance of
blob_store.BlobStore to be tested and cleanup is a function which
destroys blob_store, releasing any resources held by it.
""" | Create a test blob store.
Returns:
A tuple (blob_store, cleanup), where blob_store is an instance of
blob_store.BlobStore to be tested and cleanup is a function which
destroys blob_store, releasing any resources held by it. | CreateBlobStore | python | google/grr | grr/server/grr_response_server/blob_store_test_mixin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/blob_store_test_mixin.py | Apache-2.0 |
def FromResponses(cls, request=None, responses=None) -> "Responses":
"""Creates a Responses object from new style flow request and responses."""
res = cls()
res.request = request
if request:
res.request_data = request.request_data
for r in responses or []:
if isinstance(r, rdf_flow_objects.FlowResponse):
res.responses.append(r.payload)
elif isinstance(r, rdf_flow_objects.FlowStatus):
res.status = r
res.success = r.status == "OK"
elif isinstance(r, rdf_flow_objects.FlowIterator):
pass
else:
raise TypeError("Got unexpected response type: %s" % type(r))
return res | Creates a Responses object from new style flow request and responses. | FromResponses | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def FromResponsesProto2Any(
cls,
responses: Sequence[
Union[
rdf_flow_objects.FlowResponse,
rdf_flow_objects.FlowStatus,
rdf_flow_objects.FlowIterator,
],
],
request: Optional[rdf_flow_objects.FlowRequest] = None,
) -> "Responses[any_pb2.Any]":
# pytype: enable=name-error
"""Creates a `Response` object from raw flow responses.
Unlike the `Responses.FromResponses` method, this method does not use any
RDF-value magic to deserialize `Any` messages on the fly. Instead, it just
passes raw `Any` message as it is stored in the `any_payload` field of the
`FlowResponse` message.
Args:
responses: Flow responses from which to construct this object.
request: Flow request to which these responses belong.
Returns:
Wrapped flow responses.
"""
result = Responses()
if request is not None:
result.request = request
result.request_data = request.request_data
for response in responses:
if isinstance(response, rdf_flow_objects.FlowStatus):
if result.status is not None:
raise ValueError(f"Duplicated status response: {response}")
result.success = (
response.status == rdf_flow_objects.FlowStatus.Status.OK
)
result.status = response
elif isinstance(response, rdf_flow_objects.FlowResponse):
result.responses.append(response.any_payload.AsPrimitiveProto())
else:
# Note that this also covers `FlowIterator`—it is a legacy class that
# should no longer be used and new state methods (that are expected to
# trigger this code path) should not rely on it.
raise TypeError(f"Unexpected response: {response}")
if result.status is None:
raise ValueError("Missing status response")
return result | Creates a `Response` object from raw flow responses.
Unlike the `Responses.FromResponses` method, this method does not use any
RDF-value magic to deserialize `Any` messages on the fly. Instead, it just
passes raw `Any` message as it is stored in the `any_payload` field of the
`FlowResponse` message.
Args:
responses: Flow responses from which to construct this object.
request: Flow request to which these responses belong.
Returns:
Wrapped flow responses. | FromResponsesProto2Any | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def First(self) -> Optional[T]:
"""A convenience method to return the first response."""
for x in self:
return x | A convenience method to return the first response. | First | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def Last(self) -> Optional[T]:
"""A convenience method to return the last response."""
*_, last = self
return last | A convenience method to return the last response. | Last | python | google/grr | grr/server/grr_response_server/flow_responses.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_responses.py | Apache-2.0 |
def GetBigQueryClient(
service_account_json=None, project_id=None, dataset_id=None
):
"""Create a BigQueryClient."""
service_account_data = (
service_account_json or config.CONFIG["BigQuery.service_acct_json"]
)
project_id = project_id or config.CONFIG["BigQuery.project_id"]
dataset_id = dataset_id or config.CONFIG["BigQuery.dataset_id"]
if not (service_account_data and project_id and dataset_id):
raise RuntimeError(
"BigQuery.service_account_json, "
"BigQuery.project_id and BigQuery.dataset_id "
"must be defined."
)
creds = ServiceAccountCredentials.from_json_keyfile_dict(
json.loads(service_account_data), scopes=BIGQUERY_SCOPE
)
http_obj = httplib2.Http()
http_obj = creds.authorize(http_obj)
service = discovery.build("bigquery", "v2", http=http_obj)
return BigQueryClient(
project_id=project_id, bq_service=service, dataset_id=dataset_id
) | Create a BigQueryClient. | GetBigQueryClient | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def CreateDataset(self):
"""Create a dataset."""
body = {
"datasetReference": {
"datasetId": self.dataset_id,
"description": "Data exported from GRR",
"friendlyName": "GRRExportData",
"projectId": self.project_id,
}
}
result = (
self.service.datasets()
.insert(projectId=self.project_id, body=body)
.execute()
)
self.datasets[self.dataset_id] = result
return result | Create a dataset. | CreateDataset | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def GetDataset(self, dataset_id):
"""Get a dataset."""
if dataset_id not in self.datasets:
try:
result = (
self.service.datasets()
.get(projectId=self.project_id, datasetId=dataset_id)
.execute()
)
self.datasets[dataset_id] = result
except errors.HttpError:
return None
return self.datasets[dataset_id] | Get a dataset. | GetDataset | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def IsErrorRetryable(self, e):
"""Return true if we should retry on this error.
Default status codes come from this advice:
https://developers.google.com/api-client-library/python/guide/media_upload
Args:
e: errors.HttpError object.
Returns:
boolean
"""
return e.resp.status in config.CONFIG["BigQuery.retry_status_codes"] | Return true if we should retry on this error.
Default status codes come from this advice:
https://developers.google.com/api-client-library/python/guide/media_upload
Args:
e: errors.HttpError object.
Returns:
boolean | IsErrorRetryable | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def InsertData(self, table_id, fd, schema, job_id):
"""Insert data into a bigquery table.
If the table specified doesn't exist, it will be created with the specified
schema.
Args:
table_id: string table id
fd: open file descriptor containing the newline separated JSON
schema: BigQuery schema dict
job_id: string job id
Returns:
API response object on success, None on failure
"""
configuration = {
"schema": {
"fields": schema,
},
"destinationTable": {
"projectId": self.project_id,
"tableId": table_id,
"datasetId": self.dataset_id,
},
"sourceFormat": "NEWLINE_DELIMITED_JSON",
}
body = {
"configuration": {
"load": configuration,
},
"jobReference": {
"projectId": self.project_id,
"jobId": job_id,
},
}
# File content can be gzipped for bandwidth efficiency. The server handles
# it correctly without any changes to the request.
mediafile = http.MediaFileUpload(
fd.name, mimetype="application/octet-stream"
)
job = self.service.jobs().insert(
projectId=self.project_id, body=body, media_body=mediafile
)
first_try = True
@retry.When(
errors.HttpError,
self.IsErrorRetryable,
opts=retry.Opts(
attempts=config.CONFIG["BigQuery.retry_max_attempts"],
init_delay=config.CONFIG["BigQuery.retry_interval"].AsTimedelta(),
backoff=config.CONFIG["BigQuery.retry_multiplier"],
),
)
def Execute() -> None:
nonlocal first_try
try:
job.execute()
except errors.HttpError:
if first_try:
first_try = False
if self.GetDataset(self.dataset_id):
logging.exception("Error with job: %s", job_id)
else:
# If this is our first export ever, we need to create the dataset.
logging.info("Attempting to create dataset: %s", self.dataset_id)
self.CreateDataset()
raise
try:
Execute()
except errors.HttpError as error:
raise BigQueryJobUploadError(f"Failed job '{job_id}'") from error | Insert data into a bigquery table.
If the table specified doesn't exist, it will be created with the specified
schema.
Args:
table_id: string table id
fd: open file descriptor containing the newline separated JSON
schema: BigQuery schema dict
job_id: string job id
Returns:
API response object on success, None on failure | InsertData | python | google/grr | grr/server/grr_response_server/bigquery.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/bigquery.py | Apache-2.0 |
def ProcessEvents(self, msgs=None, publisher_username=None):
"""Processes a message for the event.""" | Processes a message for the event. | ProcessEvents | python | google/grr | grr/server/grr_response_server/events.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/events.py | Apache-2.0 |
def PublishEvent(cls, event_name, event, username=None):
"""Publish the message into all listeners of the event.
We send the message to all event handlers which contain this
string in their EVENT static member. This allows the event to be
sent to multiple interested listeners.
Args:
event_name: An event name.
event: The message to send to the event handler.
username: Username of the publisher of the message.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
"""
cls.PublishMultipleEvents({event_name: [event]}, username=username) | Publish the message into all listeners of the event.
We send the message to all event handlers which contain this
string in their EVENT static member. This allows the event to be
sent to multiple interested listeners.
Args:
event_name: An event name.
event: The message to send to the event handler.
username: Username of the publisher of the message.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage. | PublishEvent | python | google/grr | grr/server/grr_response_server/events.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/events.py | Apache-2.0 |
def PublishMultipleEvents(cls, events, username=None):
"""Publishes multiple messages at once.
Args:
events: A dict with keys being event names and values being lists of
messages.
username: Username of the publisher of the messages.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
"""
event_name_map = EventRegistry.EVENT_NAME_MAP
for event_name, messages in events.items():
if not isinstance(event_name, str):
raise ValueError(
"Event names should be string, got: %s" % type(event_name)
)
for msg in messages:
if not isinstance(msg, rdfvalue.RDFValue):
raise ValueError("Can only publish RDFValue instances.")
for event_cls in event_name_map.get(event_name, []):
event_cls().ProcessEvents(messages, publisher_username=username) | Publishes multiple messages at once.
Args:
events: A dict with keys being event names and values being lists of
messages.
username: Username of the publisher of the messages.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage. | PublishMultipleEvents | python | google/grr | grr/server/grr_response_server/events.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/events.py | Apache-2.0 |
def GetClientIDsForHostnames(
hostnames: Iterable[str],
) -> Mapping[str, Sequence[str]]:
"""Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
Returns:
A dict with a list of all known GRR client_ids for each hostname.
"""
index = ClientIndex()
keywords = set()
for hostname in hostnames:
if hostname.startswith("host:"):
keywords.add(hostname)
else:
keywords.add("host:%s" % hostname)
results = index.ReadClientPostingLists(keywords)
result = {}
for keyword, hits in results.items():
result[keyword[len("host:") :]] = hits
return result | Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
Returns:
A dict with a list of all known GRR client_ids for each hostname. | GetClientIDsForHostnames | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def _AnalyzeKeywords(self, keywords):
"""Extracts a start time from a list of keywords if present."""
start_time = max(
rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(180, rdfvalue.DAYS),
data_store.REL_DB.MinTimestamp(),
)
filtered_keywords = []
for k in keywords:
if k.startswith(self.START_TIME_PREFIX):
try:
start_time = rdfvalue.RDFDatetime.FromHumanReadable(
k[self.START_TIME_PREFIX_LEN :]
)
except ValueError:
pass
else:
filtered_keywords.append(k)
if not filtered_keywords:
filtered_keywords.append(".")
return start_time, filtered_keywords | Extracts a start time from a list of keywords if present. | _AnalyzeKeywords | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def LookupClients(self, keywords: Iterable[str]) -> Sequence[str]:
"""Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable.
"""
if isinstance(keywords, str):
raise ValueError(
"Keywords should be an iterable, not a string (got %s)." % keywords
)
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
keyword_map = data_store.REL_DB.ListClientsForKeywords(
list(map(self._NormalizeKeyword, filtered_keywords)),
start_time=start_time,
)
relevant_set = functools.reduce(
operator.and_, map(set, keyword_map.values())
)
return sorted(relevant_set) | Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable. | LookupClients | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def ReadClientPostingLists(
self, keywords: Iterable[str]
) -> Mapping[str, Sequence[str]]:
"""Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients.
"""
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
return data_store.REL_DB.ListClientsForKeywords(
filtered_keywords, start_time=start_time
) | Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients. | ReadClientPostingLists | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def AnalyzeClient(self, client: rdf_objects.ClientSnapshot) -> Sequence[str]:
"""Finds the client_id and keywords for a client.
Args:
client: A ClientSnapshot object record to find keywords for.
Returns:
A Sequence of keywords related to client.
"""
# Start with a universal keyword, used to find all clients.
#
# TODO(user): Remove the universal keyword once we have a better way
# to do this, i.e., once we have a storage library which can list all
# clients directly.
keywords = set(["."])
def TryAppend(prefix, keyword):
precondition.AssertType(prefix, str)
precondition.AssertType(keyword, str)
if keyword:
keyword_string = self._NormalizeKeyword(keyword)
keywords.add(keyword_string)
if prefix:
keywords.add(prefix + ":" + keyword_string)
def TryAppendPrefixes(prefix, keyword, delimiter):
TryAppend(prefix, keyword)
segments = keyword.split(delimiter)
for i in range(1, len(segments)):
TryAppend(prefix, delimiter.join(segments[0:i]))
return len(segments)
def TryAppendIP(ip):
TryAppend("ip", ip)
# IP4v?
if TryAppendPrefixes("ip", str(ip), ".") == 4:
return
# IP6v?
TryAppendPrefixes("ip", str(ip), ":")
def TryAppendMac(mac):
TryAppend("mac", mac)
if len(mac) == 12:
# If looks like a mac address without ":" symbols, also add the keyword
# with them.
TryAppend("mac", ":".join([mac[i : i + 2] for i in range(0, 12, 2)]))
TryAppend("host", client.knowledge_base.fqdn)
host = client.knowledge_base.fqdn.split(".", 1)[0]
TryAppendPrefixes("host", host, "-")
TryAppendPrefixes("host", client.knowledge_base.fqdn, ".")
TryAppend("", client.knowledge_base.os)
TryAppend("", client.os_release)
TryAppend("", client.os_version)
TryAppend("", client.kernel)
TryAppend("", client.arch)
TryAppend("serial_number", client.hardware_info.serial_number)
TryAppend("system_uuid", client.hardware_info.system_uuid)
kb = client.knowledge_base
if kb:
for user in kb.users:
TryAppend("user", user.username)
TryAppend("", user.full_name)
if user.full_name:
for name in user.full_name.split():
# full_name often includes nicknames and similar, wrapped in
# punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common
# wrapping characters.
TryAppend("", name.strip("\"'()"))
for ip in client.GetIPAddresses():
TryAppendIP(ip)
for mac in client.GetMacAddresses():
TryAppendMac(mac)
client_info = client.startup_info.client_info
if client_info:
TryAppend("client", client_info.client_name)
TryAppend("client", str(client_info.client_version))
if client_info.labels:
for label in client_info.labels:
TryAppend("label", label)
return keywords | Finds the client_id and keywords for a client.
Args:
client: A ClientSnapshot object record to find keywords for.
Returns:
A Sequence of keywords related to client. | AnalyzeClient | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def AddClient(self, client: rdf_objects.ClientSnapshot):
"""Adds a client to the index.
Args:
client: A ClientSnapshot object record.
"""
keywords = self.AnalyzeClient(client)
keywords.add(self._NormalizeKeyword(client.client_id))
data_store.REL_DB.AddClientKeywords(client.client_id, keywords) | Adds a client to the index.
Args:
client: A ClientSnapshot object record. | AddClient | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def MultiAddClientLabels(
self,
client_ids: Collection[str],
labels: Collection[str],
) -> None:
"""Associates given labels with the specified clients.
Args:
client_ids: Client identifiers of clients to annotate with the labels.
labels: Labels to use for annotating the clients.
"""
precondition.AssertIterableType(labels, str)
keywords = set()
for label in labels:
keyword_string = self._NormalizeKeyword(label)
keywords.add(keyword_string)
keywords.add("label:" + keyword_string)
data_store.REL_DB.MultiAddClientKeywords(client_ids, keywords) | Associates given labels with the specified clients.
Args:
client_ids: Client identifiers of clients to annotate with the labels.
labels: Labels to use for annotating the clients. | MultiAddClientLabels | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def RemoveAllClientLabels(self, client_id: str):
"""Removes all labels for a given client.
Args:
client_id: The client_id.
"""
labels_to_remove = set(
[l.name for l in data_store.REL_DB.ReadClientLabels(client_id)]
)
self.RemoveClientLabels(client_id, labels_to_remove) | Removes all labels for a given client.
Args:
client_id: The client_id. | RemoveAllClientLabels | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def RemoveClientLabels(self, client_id: str, labels: Iterable[str]):
"""Removes all labels for a given client.
Args:
client_id: The client_id.
labels: A list of labels to remove.
"""
for label in labels:
keyword = self._NormalizeKeyword(label)
# This might actually delete a keyword with the same name as the label (if
# there is one).
data_store.REL_DB.RemoveClientKeyword(client_id, keyword)
data_store.REL_DB.RemoveClientKeyword(client_id, "label:%s" % keyword) | Removes all labels for a given client.
Args:
client_id: The client_id.
labels: A list of labels to remove. | RemoveClientLabels | python | google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/client_index.py | Apache-2.0 |
def Evaluate(self, client_info):
"""Evaluates the rule represented by this object.
Args:
client_info: A `db.ClientFullInfo` instance.
Returns:
A bool value of the evaluation.
"""
raise NotImplementedError | Evaluates the rule represented by this object.
Args:
client_info: A `db.ClientFullInfo` instance.
Returns:
A bool value of the evaluation. | Evaluate | python | google/grr | grr/server/grr_response_server/foreman_rules.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_rules.py | Apache-2.0 |
def Evaluate(self, client_info):
"""Evaluates rules held in the rule set.
Args:
client_info: A client_info dict as returned by ReadFullInfoClient.
Returns:
A bool value of the evaluation.
Raises:
ValueError: The match mode is of unknown value.
"""
if self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL:
quantifier = all
elif self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY:
quantifier = any
else:
raise ValueError("Unexpected match mode value: %s" % self.match_mode)
return quantifier(rule.Evaluate(client_info) for rule in self.rules) | Evaluates rules held in the rule set.
Args:
client_info: A client_info dict as returned by ReadFullInfoClient.
Returns:
A bool value of the evaluation.
Raises:
ValueError: The match mode is of unknown value. | Evaluate | python | google/grr | grr/server/grr_response_server/foreman_rules.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_rules.py | Apache-2.0 |
def hunt_id(self):
"""Returns hunt id of this rule's actions or None if there's none."""
for action in self.actions or []:
if action.hunt_id is not None:
return action.hunt_id | Returns hunt id of this rule's actions or None if there's none. | hunt_id | python | google/grr | grr/server/grr_response_server/foreman_rules.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_rules.py | Apache-2.0 |
def __init__(self, address, port):
"""Instantiates a new StatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to.
"""
super().__init__(address, port)
self._http_server = None
self._server_thread = None | Instantiates a new StatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to. | __init__ | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def Start(self):
"""Start HTTPServer."""
ip = ipaddress.ip_address(self.address)
if ip.version == 4:
server_cls = http_server.HTTPServer
else:
server_cls = IPv6HTTPServer
try:
self._http_server = server_cls(
(self.address, self.port),
StatsServerHandler,
)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
raise base_stats_server.PortInUseError(self.port)
else:
raise
self._server_thread = threading.Thread(
target=self._http_server.serve_forever
)
self._server_thread.daemon = True
self._server_thread.start() | Start HTTPServer. | Start | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def Stop(self):
"""Stops serving statistics."""
self._http_server.shutdown()
self._server_thread.join() | Stops serving statistics. | Stop | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def InitializeStatsServerOnce():
"""Starts up a varz server after everything is registered.
StatsServer implementation may be overridden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one.
"""
address = config.CONFIG["Monitoring.http_address"]
# Figure out which port to use.
port = config.CONFIG["Monitoring.http_port"]
if not port:
logging.info("Monitoring server disabled.")
return
max_port = config.CONFIG.Get("Monitoring.http_port_max", None)
if max_port is None:
# Use the same number of available ports as the adminui is using. If we
# have 10 available for adminui we will need 10 for the stats server.
adminui_max_port = config.CONFIG.Get(
"AdminUI.port_max", config.CONFIG["AdminUI.port"]
)
max_port = port + adminui_max_port - config.CONFIG["AdminUI.port"]
try:
# pylint: disable=g-import-not-at-top
from grr_response_server.local import stats_server
# pylint: enable=g-import-not-at-top
server_cls = stats_server.StatsServer
logging.debug("Using local StatsServer")
except ImportError:
logging.debug("Using default StatsServer")
server_cls = StatsServer
for port in range(port, max_port + 1):
try:
logging.info(
"Starting monitoring server on address %s and port %d.", address, port
)
server_obj = server_cls(address, port)
server_obj.Start()
return
except base_stats_server.PortInUseError as e:
if e.port < max_port:
logging.info(e)
continue
raise | Starts up a varz server after everything is registered.
StatsServer implementation may be overridden. If there's a "stats_server"
module present in grr/local directory then
grr.local.stats_server.StatsServer implementation will be used instead of
a default one. | InitializeStatsServerOnce | python | google/grr | grr/server/grr_response_server/stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/stats_server.py | Apache-2.0 |
def __init__(self, port):
"""Instantiates a new PortInUseError.
Args:
port: The port being used.
"""
super().__init__("Port {} is already in use.".format(port))
self.port = port | Instantiates a new PortInUseError.
Args:
port: The port being used. | __init__ | python | google/grr | grr/server/grr_response_server/base_stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/base_stats_server.py | Apache-2.0 |
def __init__(self, address, port):
"""Instantiates a new BaseStatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to.
"""
self.address = address
self.port = port | Instantiates a new BaseStatsServer.
Args:
address: The IP address of the server to bind.
port: The TCP port that the server should listen to. | __init__ | python | google/grr | grr/server/grr_response_server/base_stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/base_stats_server.py | Apache-2.0 |
def Start(self):
"""Starts serving statistics.
Raises:
PortInUseError: The given port is already used.
"""
raise NotImplementedError() | Starts serving statistics.
Raises:
PortInUseError: The given port is already used. | Start | python | google/grr | grr/server/grr_response_server/base_stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/base_stats_server.py | Apache-2.0 |
def Stop(self):
"""Stops serving statistics."""
raise NotImplementedError() | Stops serving statistics. | Stop | python | google/grr | grr/server/grr_response_server/base_stats_server.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/base_stats_server.py | Apache-2.0 |
def __init__(self, message_queue, pool, name):
# fmt: off
"""Initializer.
This creates a new worker object for the ThreadPool class.
Args:
message_queue: A queue.Queue object used by the ThreadPool class to
communicate with the workers. When a new task arrives, the ThreadPool
notifies the workers by putting a message into this queue that has the
format (target, args, name, queueing_time).
target - A callable, the function to call.
args - A tuple of positional arguments to target. Keyword arguments
are not supported.
name - A name for this task. If None, it will be unique generated by
the threading library.
queueing_time - The timestamp when this task was queued as returned by
time.time().
Or, alternatively, the message in the queue can be STOP_MESSAGE
which indicates that the worker should terminate.
pool: The thread pool this worker belongs to.
name: A name for this worker thread.
"""
# fmt: on
super().__init__(name=name)
self.pool = pool
self._queue = message_queue
self.daemon = True
self.idle = True
self.started = time.time() | Initializer.
This creates a new worker object for the ThreadPool class.
Args:
message_queue: A queue.Queue object used by the ThreadPool class to
communicate with the workers. When a new task arrives, the ThreadPool
notifies the workers by putting a message into this queue that has the
format (target, args, name, queueing_time).
target - A callable, the function to call.
args - A tuple of positional arguments to target. Keyword arguments
are not supported.
name - A name for this task. If None, it will be unique generated by
the threading library.
queueing_time - The timestamp when this task was queued as returned by
time.time().
Or, alternatively, the message in the queue can be STOP_MESSAGE
which indicates that the worker should terminate.
pool: The thread pool this worker belongs to.
name: A name for this worker thread. | __init__ | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def ProcessTask(self, target, args, name, queueing_time):
"""Processes the tasks."""
if self.pool.name:
time_in_queue = time.time() - queueing_time
THREADPOOL_QUEUEING_TIME.RecordEvent(
time_in_queue, fields=[self.pool.name]
)
start_time = time.time()
try:
target(*args)
# We can't let a worker die because one of the tasks it has to process
# throws an exception. Therefore, we catch every error that is
# raised in the call to target().
except Exception: # pylint: disable=broad-except
if self.pool.name:
THREADPOOL_TASK_EXCEPTIONS.Increment(fields=[self.pool.name])
logging.exception("Caught exception in worker thread (%s)", name)
if self.pool.name:
total_time = time.time() - start_time
THREADPOOL_WORKING_TIME.RecordEvent(total_time, fields=[self.pool.name]) | Processes the tasks. | ProcessTask | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
def _RemoveFromPool(self):
"""Remove ourselves from the pool.
Returns:
True if removal was possible, and False if it was not possible.
"""
with self.pool.lock:
# Pool is shutting down, we can't interfere.
if not self.pool.started:
return False
# Keep a minimum number of threads in the pool.
if len(self.pool) <= self.pool.min_threads:
return False
# Remove us from our pool.
self.pool._RemoveWorker(self.name) # pylint: disable=protected-access
return True | Remove ourselves from the pool.
Returns:
True if removal was possible, and False if it was not possible. | _RemoveFromPool | python | google/grr | grr/server/grr_response_server/threadpool.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool.py | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.