code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def testFailToCreateThread(self):
"""Test that we handle thread creation problems ok."""
# The pool starts off with the minimum number of threads.
self.assertLen(self.test_pool, self.NUMBER_OF_THREADS)
done_event = threading.Event()
def Block(done):
done.wait()
def RaisingStart(_):
raise threading.ThreadError()
# Now simulate failure of creating threads.
with mock.patch.object(threadpool._WorkerThread, "start", RaisingStart):
# Fill all the existing threads and wait for them to become busy.
self.test_pool.AddTask(Block, (done_event,))
self.WaitUntil(
lambda: self.test_pool.busy_threads == self.NUMBER_OF_THREADS
)
# Now fill the queue completely..
for _ in range(self.MAXIMUM_THREADS):
self.test_pool.AddTask(Block, (done_event,))
# Trying to push this task will overflow the queue, and would normally
# cause a new thread to start. We use non blocking mode to receive the
# exception.
self.assertRaises(
threadpool.Full,
self.test_pool.AddTask,
Block,
(done_event,),
blocking=False,
inline=False,
)
# Release the blocking tasks.
done_event.set()
self.test_pool.Join() | Test that we handle thread creation problems ok. | testFailToCreateThread | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testThreadsReaped(self):
"""Check that threads are reaped when too old."""
self.now = 0
with utils.MultiStubber(
(time, "time", lambda: self.now),
(threading, "_time", lambda: self.now),
(queue, "_time", lambda: self.now),
(self.test_pool, "CPUUsage", lambda: 0),
):
done_event = threading.Event()
res = []
def Block(done, count):
done.wait()
res.append(count)
for i in range(2 * self.MAXIMUM_THREADS):
self.test_pool.AddTask(Block, (done_event, i), "Blocking", inline=False)
self.assertLen(self.test_pool, self.MAXIMUM_THREADS)
# Release the threads. All threads are now idle.
done_event.set()
# Fast forward the time
self.now = 1000
# Threads will now kill themselves off and the threadpool will be reduced
# to the minimum number of threads..
self.WaitUntil(lambda: len(self.test_pool) == self.NUMBER_OF_THREADS)
# Ensure we have the minimum number of threads left now.
self.assertLen(self.test_pool, self.NUMBER_OF_THREADS) | Check that threads are reaped when too old. | testThreadsReaped | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testExportedFunctions(self):
"""Tests if the outstanding tasks variable is exported correctly."""
signal_event, wait_event = threading.Event(), threading.Event()
def RunFn():
signal_event.set()
wait_event.wait()
pool_name = "test_pool3"
pool = threadpool.ThreadPool.Factory(pool_name, 10)
pool.Start()
try:
# First 10 tasks should be scheduled immediately, as we have max_threads
# set to 10.
for _ in range(10):
signal_event.clear()
pool.AddTask(RunFn, ())
signal_event.wait(10)
# Next 5 tasks should sit in the queue.
for _ in range(5):
with self.assertStatsCounterDelta(
1, threadpool.THREADPOOL_OUTSTANDING_TASKS, fields=[pool_name]
):
pool.AddTask(RunFn, ())
finally:
wait_event.set()
pool.Stop() | Tests if the outstanding tasks variable is exported correctly. | testExportedFunctions | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testDuplicateNameError(self):
"""Tests that creating two pools with the same name fails."""
prefix = self.test_pool.name
self.assertRaises(
threadpool.DuplicateThreadpoolError, threadpool.ThreadPool, prefix, 10
) | Tests that creating two pools with the same name fails. | testDuplicateNameError | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testDuplicateName(self):
"""Tests that we can get the same pool again through the factory."""
prefix = "duplicate_name"
pool = threadpool.ThreadPool.Factory(prefix, 10)
try:
self.assertEqual(pool.started, False)
pool.Start()
self.assertEqual(pool.started, True)
# This should return the same pool as before.
pool2 = threadpool.ThreadPool.Factory(prefix, 10)
self.assertEqual(pool2.started, True)
finally:
pool.Stop() | Tests that we can get the same pool again through the factory. | testDuplicateName | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def testAnonymousThreadpool(self):
"""Tests that we can't starts anonymous threadpools."""
prefix = None
with self.assertRaises(ValueError):
threadpool.ThreadPool.Factory(prefix, 10) | Tests that we can't starts anonymous threadpools. | testAnonymousThreadpool | python | google/grr | grr/server/grr_response_server/threadpool_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/threadpool_test.py | Apache-2.0 |
def CreatePluginAndDefaultState(cls, source_urn=None, args=None):
"""Creates a plugin and returns its initial state."""
state = rdf_protodict.AttributedDict()
if args is not None:
args.Validate()
state["args"] = args
plugin = cls(source_urn=source_urn, args=args)
plugin.InitializeState(state)
return plugin, state | Creates a plugin and returns its initial state. | CreatePluginAndDefaultState | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def __init__(self, source_urn=None, args=None):
"""OutputPlugin constructor.
Constructor should be overridden to maintain instance-local state - i.e.
state that gets accumulated during the single output plugin run and that
should be used to update the global state via UpdateState method.
Args:
source_urn: URN of the data source to process the results from.
args: This plugin's arguments.
"""
self.source_urn = source_urn
self.args = args
self.lock = threading.RLock() | OutputPlugin constructor.
Constructor should be overridden to maintain instance-local state - i.e.
state that gets accumulated during the single output plugin run and that
should be used to update the global state via UpdateState method.
Args:
source_urn: URN of the data source to process the results from.
args: This plugin's arguments. | __init__ | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def InitializeState(self, state):
"""Initializes the state the output plugin can use later.
InitializeState() is called only once per plugin's lifetime. It
will be called when hunt or flow is created. It should be used to
register state variables. It's called on the worker, so no
security checks apply.
Args:
state: rdf_protodict.AttributedDict to be filled with default values.
""" | Initializes the state the output plugin can use later.
InitializeState() is called only once per plugin's lifetime. It
will be called when hunt or flow is created. It should be used to
register state variables. It's called on the worker, so no
security checks apply.
Args:
state: rdf_protodict.AttributedDict to be filled with default values. | InitializeState | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def ProcessResponses(self, state, responses):
"""Processes bunch of responses.
When responses are processed, multiple ProcessResponses() calls can
be done in a row. ProcessResponse() calls may be parallelized within the
same worker to improve output performance, therefore ProcessResponses()
implementation should be thread-safe. ProcessResponse() calls are
*always* followed by a single Flush() call on the same worker.
ProcessResponses() is called on the worker, so no security checks apply.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below).
responses: GrrMessages from the hunt results collection.
""" | Processes bunch of responses.
When responses are processed, multiple ProcessResponses() calls can
be done in a row. ProcessResponse() calls may be parallelized within the
same worker to improve output performance, therefore ProcessResponses()
implementation should be thread-safe. ProcessResponse() calls are
*always* followed by a single Flush() call on the same worker.
ProcessResponses() is called on the worker, so no security checks apply.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below).
responses: GrrMessages from the hunt results collection. | ProcessResponses | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def Flush(self, state):
"""Flushes the output plugin's state.
Flush is *always* called after a series of ProcessResponses() calls.
Flush() is called on the worker, so no security checks apply.
NOTE: This method doesn't have to be thread-safe as it's called once
after a series of ProcessResponses() calls is complete.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below).
""" | Flushes the output plugin's state.
Flush is *always* called after a series of ProcessResponses() calls.
Flush() is called on the worker, so no security checks apply.
NOTE: This method doesn't have to be thread-safe as it's called once
after a series of ProcessResponses() calls is complete.
Args:
state: rdf_protodict.AttributedDict with plugin's state. NOTE:
ProcessResponses should not change state object. All such changes should
take place in the UpdateState method (see below). | Flush | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def UpdateState(self, state):
"""Updates state of the output plugin.
UpdateState is called after a series of ProcessResponses() calls and
after a Flush() call. The implementation of this method should be
lightweight, since its will be guaranteed to be called atomically
in a middle of database transaction.
Args:
state: rdf_protodict.AttributedDict with plugin's state to be updated.
""" | Updates state of the output plugin.
UpdateState is called after a series of ProcessResponses() calls and
after a Flush() call. The implementation of this method should be
lightweight, since its will be guaranteed to be called atomically
in a middle of database transaction.
Args:
state: rdf_protodict.AttributedDict with plugin's state to be updated. | UpdateState | python | google/grr | grr/server/grr_response_server/output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/output_plugin.py | Apache-2.0 |
def DropPrivileges():
"""Attempt to drop privileges if required."""
if config.CONFIG["Server.username"]:
try:
os.setuid(pwd.getpwnam(config.CONFIG["Server.username"]).pw_uid)
except (KeyError, OSError):
logging.exception(
"Unable to switch to user %s", config.CONFIG["Server.username"]
)
raise | Attempt to drop privileges if required. | DropPrivileges | python | google/grr | grr/server/grr_response_server/server_startup.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_startup.py | Apache-2.0 |
def Init(disabled: bool = False):
"""Run all required startup routines and initialization hooks."""
# Set up a temporary syslog handler so we have somewhere to log problems
# with ConfigInit() which needs to happen before we can start our create our
# proper logging setup.
syslog_logger = logging.getLogger("TempLogger")
if os.path.exists("/dev/log"):
handler = logging.handlers.SysLogHandler(address="/dev/log")
else:
handler = logging.handlers.SysLogHandler()
syslog_logger.addHandler(handler)
# The default behavior of server components is to raise errors when
# encountering unknown config options.
flags.FLAGS.disallow_missing_config_definitions = True
try:
config_lib.SetPlatformArchContext()
config_lib.ParseConfigCommandLine(rename_invalid_writeback=False)
except config_lib.Error:
syslog_logger.exception("Died during config initialization")
raise
stats_collector = prometheus_stats_collector.PrometheusStatsCollector(
registry=prometheus_client.REGISTRY
)
stats_collector_instance.Set(stats_collector)
server_logging.ServerLoggingStartupInit()
bs_registry_init.RegisterBlobStores()
ec_registry_init.RegisterExportConverters()
gui_api_registry_init.RegisterApiCallRouters()
data_store.InitializeDataStore()
if contexts.ADMIN_UI_CONTEXT in config.CONFIG.context:
api_auth_manager.InitializeApiAuthManager()
artifact.LoadArtifactsOnce() # Requires aff4.AFF4Init.
client_approval_auth.InitializeClientApprovalAuthorizationManagerOnce()
if not disabled:
cronjobs.InitializeCronWorkerOnce()
email_alerts.InitializeEmailAlerterOnce()
http_api.InitializeHttpRequestHandlerOnce()
ip_resolver.IPResolverInitOnce()
stats_server.InitializeStatsServerOnce()
webauth.InitializeWebAuthOnce()
# Exempt config updater from this check because it is the one responsible for
# setting the variable.
if not config.CONFIG.ContextApplied("ConfigUpdater Context"):
if not config.CONFIG.Get("Server.initialized"):
raise RuntimeError(
'Config not initialized, run "grr_config_updater'
' initialize". If the server is already configured,'
' add "Server.initialized: True" to your config.'
) | Run all required startup routines and initialization hooks. | Init | python | google/grr | grr/server/grr_response_server/server_startup.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_startup.py | Apache-2.0 |
def setUp(self):
"""Make sure things are initialized."""
super().setUp()
self.client_mock = action_mocks.ClientFileFinderWithVFS()
patcher = artifact_test_lib.PatchDefaultArtifactRegistry()
patcher.start()
self.addCleanup(patcher.stop) | Make sure things are initialized. | setUp | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def LoadTestArtifacts(self):
"""Add the test artifacts in on top of whatever is in the registry."""
artifact_registry.REGISTRY.AddFileSource(
os.path.join(
config.CONFIG["Test.data_dir"], "artifacts", "test_artifacts.json"
)
) | Add the test artifacts in on top of whatever is in the registry. | LoadTestArtifacts | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def RunCollectorAndGetResults(
self,
artifact_list: Iterator[str],
client_mock: Optional[MockClient] = None,
client_id: Optional[str] = None,
error_on_no_results: bool = False,
split_output_by_artifact: bool = False,
):
"""Helper to handle running the collector flow."""
if client_mock is None:
client_mock = self.MockClient(client_id=client_id)
session_id = flow_test_lib.StartAndRunFlow(
collectors.ArtifactCollectorFlow,
client_mock=client_mock,
client_id=client_id,
flow_args=rdf_artifacts.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
error_on_no_results=error_on_no_results,
split_output_by_artifact=split_output_by_artifact,
),
creator=self.test_username,
)
return flow_test_lib.GetFlowResults(client_id, session_id) | Helper to handle running the collector flow. | RunCollectorAndGetResults | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testFilesArtifact(self):
"""Check GetFiles artifacts."""
client_id = test_lib.TEST_CLIENT_ID
with vfs_test_lib.FakeTestDataVFSOverrider():
self.RunCollectorAndGetResults(
["TestFilesArtifact"],
client_mock=action_mocks.ClientFileFinderWithVFS(),
client_id=client_id,
)
cp = db.ClientPath.OS(client_id, ("var", "log", "auth.log"))
fd = file_store.OpenFile(cp)
self.assertNotEmpty(fd.read()) | Check GetFiles artifacts. | testFilesArtifact | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testArtifactOutput(self):
"""Check we can run command based artifacts."""
client_id = test_lib.TEST_CLIENT_ID
with vfs_test_lib.FakeTestDataVFSOverrider():
# Will raise if something goes wrong.
self.RunCollectorAndGetResults(
["TestFilesArtifact"],
client_mock=self.client_mock,
client_id=client_id,
)
# Will raise if something goes wrong.
self.RunCollectorAndGetResults(
["TestFilesArtifact"],
client_mock=self.client_mock,
client_id=client_id,
split_output_by_artifact=True,
)
# Test the error_on_no_results option.
with self.assertRaises(RuntimeError) as context:
with test_lib.SuppressLogs():
self.RunCollectorAndGetResults(
["NullArtifact"],
client_mock=self.client_mock,
client_id=client_id,
split_output_by_artifact=True,
error_on_no_results=True,
)
if "collector returned 0 responses" not in str(context.exception):
raise RuntimeError("0 responses should have been returned") | Check we can run command based artifacts. | testArtifactOutput | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalWindows(self):
"""Check we can retrieve a knowledge base from a client."""
kb = self._RunKBI()
self.assertEqual(kb.environ_systemroot, "C:\\Windows")
self.assertEqual(kb.time_zone, "US/Alaska")
self.assertEqual(kb.code_page, "cp_1252")
self.assertEqual(kb.environ_windir, "C:\\Windows")
self.assertEqual(kb.environ_profilesdirectory, "C:\\Users")
self.assertEqual(kb.environ_allusersprofile, "C:\\ProgramData")
self.assertEqual(kb.environ_allusersappdata, "C:\\ProgramData")
self.assertEqual(kb.environ_temp, "C:\\Windows\\TEMP")
self.assertEqual(kb.environ_systemdrive, "C:")
self.assertCountEqual([x.username for x in kb.users], ["jim", "kovacs"])
user = kb.GetUser(username="jim")
self.assertEqual(user.username, "jim")
self.assertEqual(user.sid, "S-1-5-21-702227068-2140022151-3110739409-1000") | Check we can retrieve a knowledge base from a client. | testKnowledgeBaseRetrievalWindows | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalLinux(self):
"""Check we can retrieve a Linux kb."""
class KnowledgebaseInitMock(action_mocks.FileFinderClientMock):
def EnumerateUsers(
self,
args: None,
) -> Iterator[rdf_client.User]:
del args # Unused.
yield rdf_client.User(
username="user1",
homedir="/home/user1",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1296552099),
)
yield rdf_client.User(
username="user2",
homedir="/home/user2",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1234567890),
)
yield rdf_client.User(
username="user3",
homedir="/home/user3",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3456789012),
)
yield rdf_client.User(
username="yagharek",
homedir="/home/yagharek",
last_logon=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(7890123456),
)
session_id = flow_test_lib.StartAndRunFlow(
artifact.KnowledgeBaseInitializationFlow,
client_id=test_lib.TEST_CLIENT_ID,
client_mock=KnowledgebaseInitMock(),
)
results = flow_test_lib.GetFlowResults(test_lib.TEST_CLIENT_ID, session_id)
self.assertLen(results, 1)
self.assertIsInstance(results[0], rdf_client.KnowledgeBase)
kb = results[0]
self.assertCountEqual(
[x.username for x in kb.users], ["user1", "user2", "user3", "yagharek"]
)
user = kb.GetUser(username="user1")
self.assertEqual(user.last_logon.AsSecondsSinceEpoch(), 1296552099)
self.assertEqual(user.homedir, "/home/user1") | Check we can retrieve a Linux kb. | testKnowledgeBaseRetrievalLinux | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalLinuxNoUsers(self):
"""Cause a users.username dependency failure."""
with vfs_test_lib.FakeTestDataVFSOverrider():
kb = self._RunKBI(require_complete=False)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
self.assertCountEqual([x.username for x in kb.users], []) | Cause a users.username dependency failure. | testKnowledgeBaseRetrievalLinuxNoUsers | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testKnowledgeBaseRetrievalDarwin(self):
"""Check we can retrieve a Darwin kb."""
with vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.ClientVFSHandlerFixture,
):
kb = self._RunKBI()
self.assertEqual(kb.os_major_version, 10)
self.assertEqual(kb.os_minor_version, 9)
# scalzi from /Users dir listing.
self.assertCountEqual([x.username for x in kb.users], ["scalzi"])
user = kb.GetUser(username="scalzi")
self.assertEqual(user.homedir, "/Users/scalzi") | Check we can retrieve a Darwin kb. | testKnowledgeBaseRetrievalDarwin | python | google/grr | grr/server/grr_response_server/artifact_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_test.py | Apache-2.0 |
def testOperatingSystemSelection(self):
"""Tests that we can distinguish based on operating system."""
self.SetupClient(1, system="Windows XP")
self.SetupClient(2, system="Linux")
self.SetupClient(3, system="Windows 7")
with mock.patch.object(
hunt, "StartHuntFlowOnClient", self.StartHuntFlowOnClient
):
# Now setup the filters
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + rdfvalue.Duration.From(1, rdfvalue.HOURS)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule",
hunt_id="11111111",
)
# Matches Windows boxes
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.OS,
os=foreman_rules.ForemanOsClientRule(os_windows=True),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
self.clients_started = []
foreman_obj = foreman.Foreman()
foreman_obj.AssignTasksToClient("C.1000000000000001")
foreman_obj.AssignTasksToClient("C.1000000000000002")
foreman_obj.AssignTasksToClient("C.1000000000000003")
# Make sure that only the windows machines ran
self.assertLen(self.clients_started, 2)
self.assertEqual(self.clients_started[0][1], "C.1000000000000001")
self.assertEqual(self.clients_started[1][1], "C.1000000000000003")
self.clients_started = []
# Run again - This should not fire since it did already
foreman_obj.AssignTasksToClient("C.1000000000000001")
foreman_obj.AssignTasksToClient("C.1000000000000002")
foreman_obj.AssignTasksToClient("C.1000000000000003")
self.assertEmpty(self.clients_started) | Tests that we can distinguish based on operating system. | testOperatingSystemSelection | python | google/grr | grr/server/grr_response_server/foreman_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_test.py | Apache-2.0 |
def testIntegerComparisons(self):
"""Tests that we can use integer matching rules on the foreman."""
base_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1336480583.077736)
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1336300000.000000)
self.SetupClient(0x11, system="Windows XP", install_time=base_time)
self.SetupClient(0x12, system="Windows 7", install_time=base_time)
# This one was installed one week earlier.
one_week_ago = base_time - rdfvalue.Duration.From(1, rdfvalue.WEEKS)
self.SetupClient(0x13, system="Windows 7", install_time=one_week_ago)
self.SetupClient(0x14, system="Windows 7", last_boot_time=boot_time)
with mock.patch.object(
hunt, "StartHuntFlowOnClient", self.StartHuntFlowOnClient
):
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + rdfvalue.Duration.From(1, rdfvalue.HOURS)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule(old)",
hunt_id="11111111",
)
# Matches the old client
one_hour_ago = base_time - rdfvalue.Duration.From(1, rdfvalue.HOURS)
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="INSTALL_TIME",
operator=foreman_rules.ForemanIntegerClientRule.Operator.LESS_THAN,
value=one_hour_ago.AsSecondsSinceEpoch(),
),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule(new)",
hunt_id="22222222",
)
# Matches the newer clients
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="INSTALL_TIME",
operator=foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN,
value=one_hour_ago.AsSecondsSinceEpoch(),
),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
# Make a new rule
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule(eq)",
hunt_id="33333333",
)
# Note that this also tests the handling of nonexistent attributes.
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(
rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="LAST_BOOT_TIME",
operator="EQUAL",
value=boot_time.AsSecondsSinceEpoch(),
),
)
]
)
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(rule)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition)
foreman_obj = foreman.Foreman()
self.clients_started = []
foreman_obj.AssignTasksToClient("C.1000000000000011")
foreman_obj.AssignTasksToClient("C.1000000000000012")
foreman_obj.AssignTasksToClient("C.1000000000000013")
foreman_obj.AssignTasksToClient("C.1000000000000014")
# Make sure that the clients ran the correct flows.
self.assertLen(self.clients_started, 4)
self.assertEqual(self.clients_started[0][1], "C.1000000000000011")
self.assertEqual("22222222", self.clients_started[0][0])
self.assertEqual(self.clients_started[1][1], "C.1000000000000012")
self.assertEqual("22222222", self.clients_started[1][0])
self.assertEqual(self.clients_started[2][1], "C.1000000000000013")
self.assertEqual("11111111", self.clients_started[2][0])
self.assertEqual(self.clients_started[3][1], "C.1000000000000014")
self.assertEqual("33333333", self.clients_started[3][0]) | Tests that we can use integer matching rules on the foreman. | testIntegerComparisons | python | google/grr | grr/server/grr_response_server/foreman_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman_test.py | Apache-2.0 |
def Init(service_client=None):
"""Initializes the Fleetspeak connector."""
global CONN
global label_map
if service_client is None:
service_client_cls = fs_client.InsecureGRPCServiceClient
fleetspeak_message_listen_address = (
config.CONFIG["Server.fleetspeak_message_listen_address"] or None
)
fleetspeak_server = config.CONFIG["Server.fleetspeak_server"] or None
if fleetspeak_message_listen_address is None and fleetspeak_server is None:
logging.warning(
"Missing config options `Server.fleetspeak_message_listen_address', "
"`Server.fleetspeak_server', at least one of which is required to "
"initialize a connection to Fleetspeak; Not using Fleetspeak."
)
return
service_client = service_client_cls(
"GRR",
fleetspeak_message_listen_address=fleetspeak_message_listen_address,
fleetspeak_server=fleetspeak_server,
threadpool_size=config.CONFIG["Threadpool.size"],
)
label_map = {}
for entry in config.CONFIG["Server.fleetspeak_label_map"]:
key, value = entry.split(":")
label_map[key.strip()] = value.strip()
CONN = service_client
logging.info("Fleetspeak connector initialized.") | Initializes the Fleetspeak connector. | Init | python | google/grr | grr/server/grr_response_server/fleetspeak_connector.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_connector.py | Apache-2.0 |
def __init__(self, daily_req_limit=0, dup_interval=rdfvalue.Duration(0)):
"""Create flow throttler object.
Args:
daily_req_limit: Number of flows allow per user per client. Integer.
dup_interval: rdfvalue.Duration time during which duplicate flows will be
blocked.
"""
self.daily_req_limit = daily_req_limit
self.dup_interval = dup_interval | Create flow throttler object.
Args:
daily_req_limit: Number of flows allow per user per client. Integer.
dup_interval: rdfvalue.Duration time during which duplicate flows will be
blocked. | __init__ | python | google/grr | grr/server/grr_response_server/throttle.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/throttle.py | Apache-2.0 |
def _LoadFlows(self, client_id, min_create_time):
"""Yields all flows for the given client_id and time range.
Args:
client_id: Client id string.
min_create_time: minimum creation time (inclusive)
Yields: flow_objects.Flow objects
"""
flow_list = data_store.REL_DB.ReadAllFlowObjects(
client_id=client_id,
min_create_time=min_create_time,
include_child_flows=False,
)
flow_list = [mig_flow_objects.ToRDFFlow(flow) for flow in flow_list]
for flow_obj in flow_list:
yield flow_obj | Yields all flows for the given client_id and time range.
Args:
client_id: Client id string.
min_create_time: minimum creation time (inclusive)
Yields: flow_objects.Flow objects | _LoadFlows | python | google/grr | grr/server/grr_response_server/throttle.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/throttle.py | Apache-2.0 |
def EnforceLimits(self, client_id, user, flow_name, flow_args=None):
"""Enforce DailyFlowRequestLimit and FlowDuplicateInterval.
Look at the flows that have run on this client recently and check
we aren't exceeding our limits. Raises if limits will be exceeded by running
the specified flow.
Args:
client_id: client URN
user: username string
flow_name: name of the Flow. Only used for FlowDuplicateInterval.
flow_args: flow args rdfvalue for the flow being launched
Raises:
DailyFlowRequestLimitExceededError: if the user has already run
API.DailyFlowRequestLimit on this client in the previous 24h.
DuplicateFlowError: an identical flow was run on this machine by a user
within the API.FlowDuplicateInterval
"""
if not self.dup_interval and not self.daily_req_limit:
return
now = rdfvalue.RDFDatetime.Now()
yesterday = now - rdfvalue.Duration.From(1, rdfvalue.DAYS)
dup_boundary = now - self.dup_interval
min_create_time = min(yesterday, dup_boundary)
flow_count = 0
flow_objs = self._LoadFlows(client_id, min_create_time)
if flow_args is None:
flow_args = rdf_flows.EmptyFlowArgs()
for flow_obj in flow_objs:
if (
flow_obj.create_time > dup_boundary
and flow_obj.flow_class_name == flow_name
and flow_obj.args == flow_args
):
raise DuplicateFlowError(
"Identical %s already run on %s at %s"
% (flow_name, client_id, flow_obj.create_time),
flow_id=flow_obj.flow_id,
)
# Filter for flows started by user within the 1 day window.
if flow_obj.creator == user and flow_obj.create_time > yesterday:
flow_count += 1
# If limit is set, enforce it.
if self.daily_req_limit and flow_count >= self.daily_req_limit:
raise DailyFlowRequestLimitExceededError(
"%s flows run since %s, limit: %s"
% (flow_count, yesterday, self.daily_req_limit)
) | Enforce DailyFlowRequestLimit and FlowDuplicateInterval.
Look at the flows that have run on this client recently and check
we aren't exceeding our limits. Raises if limits will be exceeded by running
the specified flow.
Args:
client_id: client URN
user: username string
flow_name: name of the Flow. Only used for FlowDuplicateInterval.
flow_args: flow args rdfvalue for the flow being launched
Raises:
DailyFlowRequestLimitExceededError: if the user has already run
API.DailyFlowRequestLimit on this client in the previous 24h.
DuplicateFlowError: an identical flow was run on this machine by a user
within the API.FlowDuplicateInterval | EnforceLimits | python | google/grr | grr/server/grr_response_server/throttle.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/throttle.py | Apache-2.0 |
def AddDir(self, dirpath):
"""Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source.
"""
if dirpath not in self._dirs:
self._dirs.add(dirpath)
return True
return False | Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source. | AddDir | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def AddFile(self, filepath):
"""Adds a file path as a source.
Args:
filepath: a string representing a path to the file.
Returns:
True if the file is not an already existing source.
"""
if filepath not in self._files:
self._files.add(filepath)
return True
return False | Adds a file path as a source.
Args:
filepath: a string representing a path to the file.
Returns:
True if the file is not an already existing source. | AddFile | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetDirs(self):
"""Returns an iterator over defined source directory paths."""
return iter(self._dirs) | Returns an iterator over defined source directory paths. | GetDirs | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetFiles(self):
"""Returns an iterator over defined source file paths."""
return iter(self._files) | Returns an iterator over defined source file paths. | GetFiles | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetAllFiles(self):
"""Yields all defined source file paths.
This includes file paths defined directly and those defined implicitly by
defining a directory.
"""
for filepath in self._files:
yield filepath
for dirpath in self._dirs:
for filepath in ArtifactRegistrySources._GetDirYamlFiles(dirpath):
if filepath in self._files:
continue
yield filepath | Yields all defined source file paths.
This includes file paths defined directly and those defined implicitly by
defining a directory. | GetAllFiles | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _LoadArtifactsFromDatastore(self):
"""Load artifacts from the data store."""
loaded_artifacts = []
# TODO(hanuszczak): Why do we have to remove anything? If some artifact
# tries to shadow system artifact shouldn't we just ignore them and perhaps
# issue some warning instead? The datastore being loaded should be read-only
# during upload.
# A collection of artifacts that shadow system artifacts and need
# to be deleted from the data store.
to_delete = []
artifact_list = [
mig_artifacts.ToRDFArtifact(a)
for a in data_store.REL_DB.ReadAllArtifacts()
]
for artifact_value in artifact_list:
try:
self.RegisterArtifact(
artifact_value, source="datastore:", overwrite_if_exists=True
)
loaded_artifacts.append(artifact_value)
except rdf_artifacts.ArtifactDefinitionError as e:
# TODO(hanuszczak): String matching on exception message is rarely
# a good idea. Instead this should be refectored to some exception
# class and then handled separately.
if "system artifact" in str(e):
to_delete.append(artifact_value.name)
else:
raise
if to_delete:
DeleteArtifactsFromDatastore(to_delete, reload_artifacts=False)
self._dirty = True
# TODO(hanuszczak): This is connected to the previous TODO comment. Why
# do we throw exception at this point? Why do we delete something and then
# abort the whole upload procedure by throwing an exception?
detail = "system artifacts were shadowed and had to be deleted"
raise rdf_artifacts.ArtifactDefinitionError(to_delete, detail)
# Once all artifacts are loaded we can validate.
revalidate = True
while revalidate:
revalidate = False
for artifact_obj in loaded_artifacts[:]:
try:
Validate(artifact_obj)
except rdf_artifacts.ArtifactDefinitionError as e:
logging.exception("Artifact %s did not validate", artifact_obj.name)
artifact_obj.error_message = str(e)
loaded_artifacts.remove(artifact_obj)
revalidate = True | Load artifacts from the data store. | _LoadArtifactsFromDatastore | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def ArtifactsFromYaml(self, yaml_content):
"""Get a list of Artifacts from yaml."""
raw_list = list(yaml.safe_load_all(yaml_content))
# TODO(hanuszczak): I am very sceptical about that "doing the right thing"
# below. What are the real use cases?
# Try to do the right thing with json/yaml formatted as a list.
if (
isinstance(raw_list, list)
and len(raw_list) == 1
and isinstance(raw_list[0], list)
):
raw_list = raw_list[0]
# Convert json into artifact and validate.
valid_artifacts = []
for artifact_dict in raw_list:
# Old artifacts might still use deprecated fields, so we have to ignore
# such. Here, we simply delete keys from the dictionary as otherwise the
# RDF value constructor would raise on unknown fields.
for field in DEPRECATED_ARTIFACT_FIELDS:
artifact_dict.pop(field, None)
# Strip operating systems that are supported in ForensicArtifacts, but not
# in GRR. The Artifact will still be added to GRR's repository, but the
# unsupported OS will be removed. This can result in artifacts with 0
# supported_os entries. For end-users, there might still be value in
# seeing the artifact, even if the artifact's OS is not supported.
if "supported_os" in artifact_dict:
artifact_dict["supported_os"] = [
os
for os in artifact_dict["supported_os"]
if os not in rdf_artifacts.Artifact.IGNORE_OS_LIST
]
# In this case we are feeding parameters directly from potentially
# untrusted yaml/json to our RDFValue class. However, safe_load ensures
# these are all primitive types as long as there is no other
# deserialization involved, and we are passing these into protobuf
# primitive types.
try:
artifact_value = rdf_artifacts.Artifact(**artifact_dict)
valid_artifacts.append(artifact_value)
except (TypeError, AttributeError, type_info.TypeValueError) as e:
name = artifact_dict.get("name")
raise rdf_artifacts.ArtifactDefinitionError(
name, "invalid definition", cause=e
)
return valid_artifacts | Get a list of Artifacts from yaml. | ArtifactsFromYaml | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _LoadArtifactsFromFiles(self, file_paths, overwrite_if_exists=True):
"""Load artifacts from file paths as json or yaml."""
loaded_files = []
loaded_artifacts = []
for file_path in file_paths:
try:
with io.open(file_path, mode="r", encoding="utf-8") as fh:
logging.debug("Loading artifacts from %s", file_path)
for artifact_val in self.ArtifactsFromYaml(fh.read()):
self.RegisterArtifact(
artifact_val,
source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists,
)
loaded_artifacts.append(artifact_val)
logging.debug(
"Loaded artifact %s from %s", artifact_val.name, file_path
)
loaded_files.append(file_path)
except (IOError, OSError):
logging.exception("Failed to open artifact file %s.", file_path)
except rdf_artifacts.ArtifactDefinitionError:
logging.exception(
"Invalid artifact found in file %s with error", file_path
)
raise
# Once all artifacts are loaded we can validate.
for artifact_value in loaded_artifacts:
Validate(artifact_value) | Load artifacts from file paths as json or yaml. | _LoadArtifactsFromFiles | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def RegisterArtifact(
self,
artifact_rdfvalue,
source="datastore",
overwrite_if_exists=False,
overwrite_system_artifacts=False,
):
"""Registers a new artifact."""
artifact_name = artifact_rdfvalue.name
if artifact_name in self._artifacts:
if not overwrite_if_exists:
details = "artifact already exists and `overwrite_if_exists` is unset"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
elif not overwrite_system_artifacts:
loaded_from_datastore = self.IsLoadedFrom(artifact_name, "datastore:")
if not loaded_from_datastore:
# This artifact was not uploaded to the datastore but came from a
# file, refuse to overwrite.
details = "system artifact cannot be overwritten"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
# Preserve where the artifact was loaded from to help debugging.
self._artifact_loaded_from[artifact_name] = source
# Clear any stale errors.
artifact_rdfvalue.error_message = None
self._artifacts[artifact_rdfvalue.name] = artifact_rdfvalue | Registers a new artifact. | RegisterArtifact | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _ReloadArtifacts(self):
"""Load artifacts from all sources."""
self._artifacts = {}
self._LoadArtifactsFromFiles(self._sources.GetAllFiles())
self.ReloadDatastoreArtifacts() | Load artifacts from all sources. | _ReloadArtifacts | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def _UnregisterDatastoreArtifacts(self):
"""Remove artifacts that came from the datastore."""
to_remove = []
for name in self._artifacts:
if self.IsLoadedFrom(name, "datastore"):
to_remove.append(name)
for key in to_remove:
self._artifacts.pop(key)
self._artifact_loaded_from.pop(key) | Remove artifacts that came from the datastore. | _UnregisterDatastoreArtifacts | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifacts(
self,
os_name=None,
name_list=None,
source_type=None,
exclude_dependents=False,
reload_datastore_artifacts=False,
):
"""Retrieve artifact classes with optional filtering.
All filters must match for the artifact to be returned.
Args:
os_name: string to match against supported_os
name_list: list of strings to match against artifact names
source_type: rdf_artifacts.ArtifactSource.SourceType to match against
source_type
exclude_dependents: if true only artifacts with no dependencies will be
returned
reload_datastore_artifacts: If true, the data store sources are queried
for new artifacts.
Returns:
list of artifacts matching filter criteria
"""
self._CheckDirty(reload_datastore_artifacts=reload_datastore_artifacts)
results = {}
for artifact in self._artifacts.values():
# artifact.supported_os = [] matches all OSes
if (
os_name
and artifact.supported_os
and (os_name not in artifact.supported_os)
):
continue
if name_list and artifact.name not in name_list:
continue
if source_type:
source_types = [c.type for c in artifact.sources]
if source_type not in source_types:
continue
if exclude_dependents and GetArtifactPathDependencies(artifact):
continue
results[artifact.name] = artifact
return list(results.values()) | Retrieve artifact classes with optional filtering.
All filters must match for the artifact to be returned.
Args:
os_name: string to match against supported_os
name_list: list of strings to match against artifact names
source_type: rdf_artifacts.ArtifactSource.SourceType to match against
source_type
exclude_dependents: if true only artifacts with no dependencies will be
returned
reload_datastore_artifacts: If true, the data store sources are queried
for new artifacts.
Returns:
list of artifacts matching filter criteria | GetArtifacts | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifact(self, name):
"""Get artifact by name.
Args:
name: artifact name string.
Returns:
artifact object.
Raises:
ArtifactNotRegisteredError: if artifact doesn't exist in the registry.
"""
self._CheckDirty()
result = self._artifacts.get(name)
if not result:
raise rdf_artifacts.ArtifactNotRegisteredError(
"Artifact %s missing from registry. You may need to sync the "
"artifact repo by running make in the artifact directory." % name
)
return result | Get artifact by name.
Args:
name: artifact name string.
Returns:
artifact object.
Raises:
ArtifactNotRegisteredError: if artifact doesn't exist in the registry. | GetArtifact | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def Exists(self, name: str) -> bool:
"""Checks whether the artifact of the specified name exists in the registry.
Args:
name: A name of the artifact.
Returns:
`True` if the artifact exists, `False` otherwise.
"""
try:
self.GetArtifact(name)
except rdf_artifacts.ArtifactNotRegisteredError:
return False
return True | Checks whether the artifact of the specified name exists in the registry.
Args:
name: A name of the artifact.
Returns:
`True` if the artifact exists, `False` otherwise. | Exists | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def DeleteArtifactsFromDatastore(artifact_names, reload_artifacts=True):
"""Deletes a list of artifacts from the data store."""
artifacts_list = REGISTRY.GetArtifacts(
reload_datastore_artifacts=reload_artifacts
)
to_delete = set(artifact_names)
deps = set()
for artifact_obj in artifacts_list:
if artifact_obj.name in to_delete:
continue
if GetArtifactDependencies(artifact_obj) & to_delete:
deps.add(str(artifact_obj.name))
if deps:
raise ValueError(
"Artifact(s) %s depend(s) on one of the artifacts to delete."
% ",".join(deps)
)
found_artifact_names = set()
for artifact_value in artifacts_list:
if artifact_value.name in to_delete:
found_artifact_names.add(artifact_value.name)
if len(found_artifact_names) != len(to_delete):
not_found = to_delete - found_artifact_names
raise ValueError(
"Artifact(s) to delete (%s) not found." % ",".join(not_found)
)
for artifact_name in to_delete:
data_store.REL_DB.DeleteArtifact(str(artifact_name))
REGISTRY.UnregisterArtifact(artifact_name) | Deletes a list of artifacts from the data store. | DeleteArtifactsFromDatastore | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def ValidateSyntax(rdf_artifact):
"""Validates artifact syntax.
This method can be used to validate individual artifacts as they are loaded,
without needing all artifacts to be loaded first, as for Validate().
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactSyntaxError: If artifact syntax is invalid.
"""
if not rdf_artifact.doc:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "missing doc")
for supp_os in rdf_artifact.supported_os:
valid_os = rdf_artifact.SUPPORTED_OS_LIST
if supp_os not in valid_os:
detail = "invalid `supported_os` ('%s' not in %s)" % (supp_os, valid_os)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
kb_field_names = rdf_client.KnowledgeBase().GetKbFieldNames()
# Any %%blah%% path dependencies must be defined in the KnowledgeBase
for dep in GetArtifactPathDependencies(rdf_artifact):
if dep not in kb_field_names:
detail = f"broken path dependencies ({dep!r} not in {kb_field_names})"
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
for source in rdf_artifact.sources:
try:
source.Validate()
except rdf_artifacts.ArtifactSourceSyntaxError as e:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "bad source", e) | Validates artifact syntax.
This method can be used to validate individual artifacts as they are loaded,
without needing all artifacts to be loaded first, as for Validate().
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactSyntaxError: If artifact syntax is invalid. | ValidateSyntax | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def ValidateDependencies(rdf_artifact):
"""Validates artifact dependencies.
This method checks whether all dependencies of the artifact are present
and contain no errors.
This method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDependencyError: If a dependency is missing or contains errors.
"""
for dependency in GetArtifactDependencies(rdf_artifact):
try:
dependency_obj = REGISTRY.GetArtifact(dependency)
except rdf_artifacts.ArtifactNotRegisteredError as e:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "missing dependency", cause=e
)
message = dependency_obj.error_message
if message:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "dependency error", cause=message
) | Validates artifact dependencies.
This method checks whether all dependencies of the artifact are present
and contain no errors.
This method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDependencyError: If a dependency is missing or contains errors. | ValidateDependencies | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def Validate(rdf_artifact):
"""Attempts to validate the artifact has been well defined.
This checks both syntax and dependencies of the artifact. Because of that,
this method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDefinitionError: If artifact is invalid.
"""
ValidateSyntax(rdf_artifact)
ValidateDependencies(rdf_artifact) | Attempts to validate the artifact has been well defined.
This checks both syntax and dependencies of the artifact. Because of that,
this method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDefinitionError: If artifact is invalid. | Validate | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1):
"""Return a set of artifact dependencies.
Args:
rdf_artifact: RDF object artifact.
recursive: If True recurse into dependencies to find their dependencies.
depth: Used for limiting recursion depth.
Returns:
A set of strings containing the dependent artifact names.
Raises:
RuntimeError: If maximum recursion depth reached.
"""
deps = set()
for source in rdf_artifact.sources:
if source.type == rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP:
if source.attributes.GetItem("names"):
deps.update(source.attributes.GetItem("names"))
if depth > 10:
raise RuntimeError("Max artifact recursion depth reached.")
deps_set = set(deps)
if recursive:
for dep in deps:
artifact_obj = REGISTRY.GetArtifact(dep)
new_dep = GetArtifactDependencies(artifact_obj, True, depth=depth + 1)
if new_dep:
deps_set.update(new_dep)
return deps_set | Return a set of artifact dependencies.
Args:
rdf_artifact: RDF object artifact.
recursive: If True recurse into dependencies to find their dependencies.
depth: Used for limiting recursion depth.
Returns:
A set of strings containing the dependent artifact names.
Raises:
RuntimeError: If maximum recursion depth reached. | GetArtifactDependencies | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifactsDependenciesClosure(name_list, os_name=None):
"""For all the artifacts in the list returns them and their dependencies."""
artifacts = {
a.name: a
for a in REGISTRY.GetArtifacts(os_name=os_name, name_list=name_list)
}
dep_names = set()
for art in artifacts.values():
dep_names.update(GetArtifactDependencies(art, recursive=True))
if dep_names:
for dep in REGISTRY.GetArtifacts(os_name=os_name, name_list=dep_names):
artifacts[dep.name] = dep
return list(artifacts.values()) | For all the artifacts in the list returns them and their dependencies. | GetArtifactsDependenciesClosure | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def GetArtifactPathDependencies(rdf_artifact):
"""Return a set of knowledgebase path dependencies.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
deps = set()
for source in rdf_artifact.sources:
for arg, value in source.attributes.items():
paths = []
if arg in ["path", "query"]:
paths.append(value)
if arg == "key_value_pairs":
# This is a REGISTRY_VALUE {key:blah, value:blah} dict.
paths.extend([x["key"] for x in value])
if arg in ["keys", "paths", "path_list", "content_regex_list"]:
paths.extend(value)
for path in paths:
for match in artifact_utils.INTERPOLATED_REGEX.finditer(path):
deps.add(match.group()[2:-2]) # Strip off %%.
return deps | Return a set of knowledgebase path dependencies.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"] | GetArtifactPathDependencies | python | google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact_registry.py | Apache-2.0 |
def InitializeDataStore():
"""Initialize the data store.
Depends on the stats module being initialized.
"""
global REL_DB # pylint: disable=global-statement
global BLOBS # pylint: disable=global-statement
if _LIST_STORAGE.value:
_ListStorageOptions()
sys.exit(0)
# Initialize the relational DB.
rel_db_name = config.CONFIG["Database.implementation"]
if not rel_db_name:
# TODO(hanuszczak): I think we should raise here instead of silently doing
# nothing.
return
try:
cls = registry_init.REGISTRY[rel_db_name]
except KeyError:
raise ValueError("Database %s not found." % rel_db_name)
logging.info("Using database implementation %s", rel_db_name)
REL_DB = db.DatabaseValidationWrapper(cls())
# Initialize the blobstore. This has to be done after the database has been
# already initialized as it might be possible that users want to use the data-
# base-backed blobstore implementation.
blobstore_name = config.CONFIG.Get("Blobstore.implementation")
try:
cls = blob_store.REGISTRY[blobstore_name]
except KeyError:
raise ValueError("No blob store %s found." % blobstore_name)
BLOBS = blob_store.BlobStoreValidationWrapper(cls()) | Initialize the data store.
Depends on the stats module being initialized. | InitializeDataStore | python | google/grr | grr/server/grr_response_server/data_store.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store.py | Apache-2.0 |
def _GenHttpRequestProto(self):
"""Create a valid request object."""
request = jobs_pb2.HttpRequest()
request.source_ip = "127.0.0.1"
request.user_agent = "Firefox or something"
request.url = "http://test.com/test?omg=11%45x%20%20"
request.user = "anonymous"
request.timestamp = int(time.time() * 1e6)
request.size = 1000
return request | Create a valid request object. | _GenHttpRequestProto | python | google/grr | grr/server/grr_response_server/server_logging_test.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging_test.py | Apache-2.0 |
def StopHuntIfCrashLimitExceeded(hunt_id):
"""Stops the hunt if number of crashes exceeds the limit."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
# Do nothing if the hunt is already stopped.
if hunt_obj.hunt_state == rdf_hunt_objects.Hunt.HuntState.STOPPED:
return hunt_obj
if hunt_obj.crash_limit:
hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id)
if hunt_counters.num_crashed_clients >= hunt_obj.crash_limit:
# Remove our rules from the forman and cancel all the started flows.
# Hunt will be hard-stopped and it will be impossible to restart it.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the crashes limit of"
f" {hunt_obj.crash_limit} and was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.TOTAL_CRASHES_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
return hunt_obj | Stops the hunt if number of crashes exceeds the limit. | StopHuntIfCrashLimitExceeded | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StopHuntIfCPUOrNetworkLimitsExceeded(hunt_id):
"""Stops the hunt if average limites are exceeded."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
# Do nothing if the hunt is already stopped.
if hunt_obj.hunt_state == rdf_hunt_objects.Hunt.HuntState.STOPPED:
return hunt_obj
hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id)
# Check global hunt network bytes limit first.
if (
hunt_obj.total_network_bytes_limit
and hunt_counters.total_network_bytes_sent
> hunt_obj.total_network_bytes_limit
):
reason = (
f"Hunt {hunt_obj.hunt_id} reached the total network bytes sent limit of"
f" {hunt_obj.total_network_bytes_limit} and was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.TOTAL_NETWORK_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
# Check that we have enough clients to apply average limits.
if hunt_counters.num_clients < MIN_CLIENTS_FOR_AVERAGE_THRESHOLDS:
return hunt_obj
# Check average per-client results count limit.
if hunt_obj.avg_results_per_client_limit:
avg_results_per_client = (
hunt_counters.num_results / hunt_counters.num_clients
)
if avg_results_per_client > hunt_obj.avg_results_per_client_limit:
# Stop the hunt since we get too many results per client.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the average results per client "
f"limit of {hunt_obj.avg_results_per_client_limit} and was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.AVG_RESULTS_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
# Check average per-client CPU seconds limit.
if hunt_obj.avg_cpu_seconds_per_client_limit:
avg_cpu_seconds_per_client = (
hunt_counters.total_cpu_seconds / hunt_counters.num_clients
)
if avg_cpu_seconds_per_client > hunt_obj.avg_cpu_seconds_per_client_limit:
# Stop the hunt since we use too many CPUs per client.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the average CPU seconds per client"
f" limit of {hunt_obj.avg_cpu_seconds_per_client_limit} and was"
" stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.AVG_CPU_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
# Check average per-client network bytes limit.
if hunt_obj.avg_network_bytes_per_client_limit:
avg_network_bytes_per_client = (
hunt_counters.total_network_bytes_sent / hunt_counters.num_clients
)
if (
avg_network_bytes_per_client
> hunt_obj.avg_network_bytes_per_client_limit
):
# Stop the hunt since we use too many network bytes sent
# per client.
reason = (
f"Hunt {hunt_obj.hunt_id} reached the average network bytes per"
f" client limit of {hunt_obj.avg_network_bytes_per_client_limit} and"
" was stopped."
)
hunt_state_reason = hunts_pb2.Hunt.HuntStateReason.AVG_NETWORK_EXCEEDED
StopHunt(
hunt_obj.hunt_id,
hunt_state_reason=hunt_state_reason,
reason_comment=reason,
)
return hunt_obj | Stops the hunt if average limites are exceeded. | StopHuntIfCPUOrNetworkLimitsExceeded | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def CompleteHuntIfExpirationTimeReached(hunt_id: str) -> rdf_hunt_objects.Hunt:
"""Marks the hunt as complete if it's past its expiry time."""
# TODO(hanuszczak): This should not set the hunt state to `COMPLETED` but we
# should have a separate `EXPIRED` state instead and set that.
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if (
hunt_obj.hunt_state
not in [
rdf_hunt_objects.Hunt.HuntState.STOPPED,
rdf_hunt_objects.Hunt.HuntState.COMPLETED,
]
and hunt_obj.expired
):
StopHunt(
hunt_obj.hunt_id,
hunts_pb2.Hunt.HuntStateReason.DEADLINE_REACHED,
reason_comment="Hunt completed.",
)
data_store.REL_DB.UpdateHuntObject(
hunt_obj.hunt_id, hunt_state=hunts_pb2.Hunt.HuntState.COMPLETED
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_obj.hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Marks the hunt as complete if it's past its expiry time. | CompleteHuntIfExpirationTimeReached | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def CreateHunt(hunt_obj: hunts_pb2.Hunt):
"""Creates a hunt using a given hunt object."""
data_store.REL_DB.WriteHuntObject(hunt_obj)
if hunt_obj.output_plugins:
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
output_plugins_states = flow.GetOutputPluginStates(
hunt_obj.output_plugins, source=f"hunts/{hunt_obj.hunt_id}"
)
output_plugins_states = [
mig_flow_runner.ToProtoOutputPluginState(state)
for state in output_plugins_states
]
data_store.REL_DB.WriteHuntOutputPluginsStates(
hunt_obj.hunt_id, output_plugins_states
) | Creates a hunt using a given hunt object. | CreateHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def CreateAndStartHunt(flow_name, flow_args, creator, **kwargs):
"""Creates and starts a new hunt."""
# This interface takes a time when the hunt expires. However, the legacy hunt
# starting interface took an rdfvalue.DurationSeconds object which was then
# added to the current time to get the expiry. This check exists to make sure
# we don't confuse the two.
if "duration" in kwargs:
precondition.AssertType(kwargs["duration"], rdfvalue.Duration)
hunt_args = rdf_hunt_objects.HuntArguments.Standard(
flow_name=flow_name, flow_args=rdf_structs.AnyValue.Pack(flow_args)
)
hunt_obj = rdf_hunt_objects.Hunt(
creator=creator,
args=hunt_args,
create_time=rdfvalue.RDFDatetime.Now(),
**kwargs,
)
hunt_obj = mig_hunt_objects.ToProtoHunt(hunt_obj)
CreateHunt(hunt_obj)
StartHunt(hunt_obj.hunt_id)
return hunt_obj.hunt_id | Creates and starts a new hunt. | CreateAndStartHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def _ScheduleGenericHunt(hunt_obj: rdf_hunt_objects.Hunt):
"""Adds foreman rules for a generic hunt."""
# TODO: Migrate foreman conditions to use relation expiration
# durations instead of absolute timestamps.
foreman_condition = foreman_rules.ForemanCondition(
creation_time=rdfvalue.RDFDatetime.Now(),
expiration_time=hunt_obj.init_start_time + hunt_obj.duration,
description=f"Hunt {hunt_obj.hunt_id} {hunt_obj.args.hunt_type}",
client_rule_set=hunt_obj.client_rule_set,
hunt_id=hunt_obj.hunt_id,
)
# Make sure the rule makes sense.
foreman_condition.Validate()
proto_foreman_condition = mig_foreman_rules.ToProtoForemanCondition(
foreman_condition
)
data_store.REL_DB.WriteForemanRule(proto_foreman_condition) | Adds foreman rules for a generic hunt. | _ScheduleGenericHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def _ScheduleVariableHunt(hunt_obj: rdf_hunt_objects.Hunt):
"""Schedules flows for a variable hunt."""
if hunt_obj.client_rate != 0:
raise VariableHuntCanNotHaveClientRateError(
hunt_obj.hunt_id, hunt_obj.client_rate
)
seen_clients = set()
for flow_group in hunt_obj.args.variable.flow_groups:
for client_id in flow_group.client_ids:
if client_id in seen_clients:
raise CanStartAtMostOneFlowPerClientError(hunt_obj.hunt_id, client_id)
seen_clients.add(client_id)
now = rdfvalue.RDFDatetime.Now()
for flow_group in hunt_obj.args.variable.flow_groups:
flow_cls = registry.FlowRegistry.FlowClassByName(flow_group.flow_name)
if flow_group.HasField("flow_args"):
flow_args = flow_group.flow_args.Unpack(flow_cls.args_type)
else:
flow_args = None
for client_id in flow_group.client_ids:
flow.StartFlow(
client_id=client_id,
creator=hunt_obj.creator,
cpu_limit=hunt_obj.per_client_cpu_limit,
network_bytes_limit=hunt_obj.per_client_network_bytes_limit,
flow_cls=flow_cls,
flow_args=flow_args,
# Setting start_at explicitly ensures that flow.StartFlow won't
# process flow's Start state right away. Only the flow request
# will be scheduled.
start_at=now,
parent=flow.FlowParent.FromHuntID(hunt_obj.hunt_id),
) | Schedules flows for a variable hunt. | _ScheduleVariableHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StartHunt(hunt_id) -> rdf_hunt_objects.Hunt:
"""Starts a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
num_hunt_clients = data_store.REL_DB.CountHuntFlows(hunt_id)
if hunt_obj.hunt_state != hunt_obj.HuntState.PAUSED:
raise OnlyPausedHuntCanBeStartedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
hunt_state=hunts_pb2.Hunt.HuntState.STARTED,
start_time=rdfvalue.RDFDatetime.Now(),
num_clients_at_start_time=num_hunt_clients,
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD:
_ScheduleGenericHunt(hunt_obj)
elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE:
_ScheduleVariableHunt(hunt_obj)
else:
raise UnknownHuntTypeError(
f"Invalid hunt type for hunt {hunt_id}: {hunt_obj.args.hunt_type}"
)
return hunt_obj | Starts a hunt with a given id. | StartHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def PauseHunt(
hunt_id,
hunt_state_reason=None,
reason=None,
) -> rdf_hunt_objects.Hunt:
"""Pauses a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.hunt_state != hunt_obj.HuntState.STARTED:
raise OnlyStartedHuntCanBePausedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
hunt_state=hunts_pb2.Hunt.HuntState.PAUSED,
hunt_state_reason=hunt_state_reason,
hunt_state_comment=reason,
)
data_store.REL_DB.RemoveForemanRule(hunt_id=hunt_obj.hunt_id)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Pauses a hunt with a given id. | PauseHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StopHunt(
hunt_id: str,
hunt_state_reason: Optional[
hunts_pb2.Hunt.HuntStateReason.ValueType
] = None,
reason_comment: Optional[str] = None,
) -> rdf_hunt_objects.Hunt:
"""Stops a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.hunt_state not in [
hunt_obj.HuntState.STARTED,
hunt_obj.HuntState.PAUSED,
]:
raise OnlyStartedOrPausedHuntCanBeStoppedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
hunt_state=hunts_pb2.Hunt.HuntState.STOPPED,
hunt_state_reason=hunt_state_reason,
hunt_state_comment=reason_comment,
)
data_store.REL_DB.RemoveForemanRule(hunt_id=hunt_obj.hunt_id)
# TODO: Stop matching on string (comment).
if (
hunt_state_reason != hunts_pb2.Hunt.HuntStateReason.TRIGGERED_BY_USER
and reason_comment is not None
and reason_comment != CANCELLED_BY_USER
and hunt_obj.creator not in access_control.SYSTEM_USERS
):
notification.Notify(
hunt_obj.creator,
objects_pb2.UserNotification.Type.TYPE_HUNT_STOPPED,
reason_comment,
objects_pb2.ObjectReference(
reference_type=objects_pb2.ObjectReference.Type.HUNT,
hunt=objects_pb2.HuntReference(hunt_id=hunt_obj.hunt_id),
),
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Stops a hunt with a given id. | StopHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def UpdateHunt(
hunt_id,
client_limit=None,
client_rate=None,
duration=None,
) -> rdf_hunt_objects.Hunt:
"""Updates a hunt (it must be paused to be updated)."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.hunt_state != hunt_obj.HuntState.PAUSED:
raise OnlyPausedHuntCanBeModifiedError(hunt_obj)
data_store.REL_DB.UpdateHuntObject(
hunt_id,
client_limit=client_limit,
client_rate=client_rate,
duration=duration,
)
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
return hunt_obj | Updates a hunt (it must be paused to be updated). | UpdateHunt | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def StartHuntFlowOnClient(client_id, hunt_id):
"""Starts a flow corresponding to a given hunt on a given client."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
# There may be a little race between foreman rules being removed and
# foreman scheduling a client on an (already) paused hunt. Making sure
# we don't lose clients in such a race by accepting clients for paused
# hunts.
if not models_hunts.IsHuntSuitableForFlowProcessing(hunt_obj.hunt_state):
return
hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj)
if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD:
hunt_args = hunt_obj.args.standard
if hunt_obj.client_rate > 0:
# Given that we use caching in _GetNumClients and hunt_obj may be updated
# in another process, we have to account for cases where num_clients_diff
# may go below 0.
num_clients_diff = max(
0,
_GetNumClients(hunt_obj.hunt_id) - hunt_obj.num_clients_at_start_time,
)
next_client_due_msecs = int(
num_clients_diff / hunt_obj.client_rate * 60e6
)
start_at = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(
hunt_obj.last_start_time.AsMicrosecondsSinceEpoch()
+ next_client_due_msecs
)
else:
start_at = None
# TODO(user): remove client_rate support when AFF4 is gone.
# In REL_DB always work as if client rate is 0.
flow_cls = registry.FlowRegistry.FlowClassByName(hunt_args.flow_name)
if hunt_args.HasField("flow_args"):
flow_args = hunt_args.flow_args.Unpack(flow_cls.args_type)
else:
flow_args = None
flow.StartFlow(
client_id=client_id,
creator=hunt_obj.creator,
cpu_limit=hunt_obj.per_client_cpu_limit,
network_bytes_limit=hunt_obj.per_client_network_bytes_limit,
flow_cls=flow_cls,
flow_args=flow_args,
start_at=start_at,
parent=flow.FlowParent.FromHuntID(hunt_id),
)
if hunt_obj.client_limit:
if _GetNumClients(hunt_obj.hunt_id) >= hunt_obj.client_limit:
try:
PauseHunt(
hunt_id,
hunt_state_reason=rdf_hunt_objects.Hunt.HuntStateReason.TOTAL_CLIENTS_EXCEEDED,
)
except OnlyStartedHuntCanBePausedError:
pass
elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE:
raise NotImplementedError()
else:
raise UnknownHuntTypeError(
f"Can't determine hunt type when starting hunt {client_id} on client"
f" {hunt_id}."
) | Starts a flow corresponding to a given hunt on a given client. | StartHuntFlowOnClient | python | google/grr | grr/server/grr_response_server/hunt.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/hunt.py | Apache-2.0 |
def ProcessMessages(self, msgs):
"""This is where messages get processed.
Override in derived classes.
Args:
msgs: The GrrMessages sent by the client.
""" | This is where messages get processed.
Override in derived classes.
Args:
msgs: The GrrMessages sent by the client. | ProcessMessages | python | google/grr | grr/server/grr_response_server/message_handlers.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/message_handlers.py | Apache-2.0 |
def __init__(self, source_urn=None):
"""OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
Raises:
ValueError: If one of the keyword arguments is empty.
"""
super().__init__()
if not source_urn:
raise ValueError("source_urn can't be empty.")
self.source_urn = source_urn | OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
Raises:
ValueError: If one of the keyword arguments is empty. | __init__ | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def output_file_name(self):
"""Name of the file where plugin's output should be written to."""
safe_path = re.sub(r":|/", "_", self.source_urn.Path().lstrip("/"))
return "results_%s%s" % (safe_path, self.output_file_extension) | Name of the file where plugin's output should be written to. | output_file_name | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def Start(self):
"""Start method is called in the beginning of the export.
Yields:
Chunks of bytes.
""" | Start method is called in the beginning of the export.
Yields:
Chunks of bytes. | Start | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ProcessValues(self, value_cls, values_generator_fn):
"""Processes a batch of values with the same type.
ProcessValues is called *once per value type* for each value type in
the flow/hunt results collection.
Args:
value_cls: Class identifying type of the values to be processed.
values_generator_fn: Function returning an iterable with values. Each
value is a GRRMessage wrapping a value of a value_cls type.
values_generator_fn may be called multiple times within 1
ProcessValues() call - for example, when multiple passes over the data
are required.
"""
raise NotImplementedError() | Processes a batch of values with the same type.
ProcessValues is called *once per value type* for each value type in
the flow/hunt results collection.
Args:
value_cls: Class identifying type of the values to be processed.
values_generator_fn: Function returning an iterable with values. Each
value is a GRRMessage wrapping a value of a value_cls type.
values_generator_fn may be called multiple times within 1
ProcessValues() call - for example, when multiple passes over the data
are required. | ProcessValues | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def Finish(self):
"""Finish method is called at the very end of the export.
Yields:
Chunks of bytes.
""" | Finish method is called at the very end of the export.
Yields:
Chunks of bytes. | Finish | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def _GetMetadataForClients(self, client_urns):
"""Fetches metadata for a given list of clients."""
result = {}
metadata_to_fetch = set()
for urn in client_urns:
try:
result[urn] = self._cached_metadata[urn]
except KeyError:
metadata_to_fetch.add(urn)
if metadata_to_fetch:
client_ids = set(urn.Basename() for urn in metadata_to_fetch)
infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids)
fetched_metadata = [
export.GetMetadata(client_id, mig_objects.ToRDFClientFullInfo(info))
for client_id, info in infos.items()
]
for metadata in fetched_metadata:
metadata.source_urn = self.source_urn
self._cached_metadata[metadata.client_urn] = metadata
result[metadata.client_urn] = metadata
metadata_to_fetch.remove(metadata.client_urn)
for urn in metadata_to_fetch:
default_mdata = base.ExportedMetadata(source_urn=self.source_urn)
result[urn] = default_mdata
self._cached_metadata[urn] = default_mdata
return [result[urn] for urn in client_urns] | Fetches metadata for a given list of clients. | _GetMetadataForClients | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def GetExportOptions(self):
"""Rerturns export options to be used by export converter."""
return base.ExportOptions() | Rerturns export options to be used by export converter. | GetExportOptions | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ProcessSingleTypeExportedValues(self, original_type, exported_values):
"""Processes exported values of the same type.
Exported_values are guaranteed to have the same type. Consequently, this
function may be called multiple times with the same original_type
argument. Typical example: when export converters generate multiple
kinds of exported values for a given source value (for example,
Process is converted to ExportedProcess and ExportedNetworkConnection
values).
Args:
original_type: Class of the original set of values that were converted to
exported_values.
exported_values: An iterator with exported value. All values are
guaranteed to have the same class.
Yields:
Chunks of bytes.
"""
raise NotImplementedError() | Processes exported values of the same type.
Exported_values are guaranteed to have the same type. Consequently, this
function may be called multiple times with the same original_type
argument. Typical example: when export converters generate multiple
kinds of exported values for a given source value (for example,
Process is converted to ExportedProcess and ExportedNetworkConnection
values).
Args:
original_type: Class of the original set of values that were converted to
exported_values.
exported_values: An iterator with exported value. All values are
guaranteed to have the same class.
Yields:
Chunks of bytes. | ProcessSingleTypeExportedValues | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def _GenerateSingleTypeIteration(
self, next_types, processed_types, converted_responses
):
"""Yields responses of a given type only.
_GenerateSingleTypeIteration iterates through converted_responses and
only yields responses of the same type. The type is either popped from
next_types or inferred from the first item of converted_responses.
The type is added to a set of processed_types.
Along the way _GenerateSingleTypeIteration updates next_types set.
All newly encountered and not previously processed types are added to
next_types set.
Calling _GenerateSingleTypeIteration multiple times allows doing
multiple passes on converted responses and emitting converted responses
of the same type continuously (so that they can be written into
the same file by the plugin).
Args:
next_types: List of value type classes that will be used in further
iterations.
processed_types: List of value type classes that have been used already.
converted_responses: Iterable with values to iterate over.
Yields:
Values from converted_response with the same type. Type is either
popped from the next_types set or inferred from the first
converted_responses value.
"""
if not next_types:
current_type = None
else:
current_type = next_types.pop()
processed_types.add(current_type)
for converted_response in converted_responses:
if not current_type:
current_type = converted_response.__class__
processed_types.add(current_type)
if converted_response.__class__ != current_type:
if converted_response.__class__ not in processed_types:
next_types.add(converted_response.__class__)
continue
yield converted_response | Yields responses of a given type only.
_GenerateSingleTypeIteration iterates through converted_responses and
only yields responses of the same type. The type is either popped from
next_types or inferred from the first item of converted_responses.
The type is added to a set of processed_types.
Along the way _GenerateSingleTypeIteration updates next_types set.
All newly encountered and not previously processed types are added to
next_types set.
Calling _GenerateSingleTypeIteration multiple times allows doing
multiple passes on converted responses and emitting converted responses
of the same type continuously (so that they can be written into
the same file by the plugin).
Args:
next_types: List of value type classes that will be used in further
iterations.
processed_types: List of value type classes that have been used already.
converted_responses: Iterable with values to iterate over.
Yields:
Values from converted_response with the same type. Type is either
popped from the next_types set or inferred from the first
converted_responses value. | _GenerateSingleTypeIteration | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def _GenerateConvertedValues(self, converter, grr_messages):
"""Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Values generated by the converter.
Raises:
ValueError: if any of the GrrMessage objects doesn't have "source" set.
"""
for batch in collection.Batch(grr_messages, self.BATCH_SIZE):
metadata_items = self._GetMetadataForClients([gm.source for gm in batch])
batch_with_metadata = zip(metadata_items, [gm.payload for gm in batch])
for result in converter.BatchConvert(batch_with_metadata):
yield result | Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Values generated by the converter.
Raises:
ValueError: if any of the GrrMessage objects doesn't have "source" set. | _GenerateConvertedValues | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ApplyPluginToMultiTypeCollection(
plugin, output_collection, source_urn=None
):
"""Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
"source" attribute set.
Yields:
Bytes chunks, as generated by the plugin.
"""
for chunk in plugin.Start():
yield chunk
for stored_type_name in sorted(output_collection.ListStoredTypes()):
stored_cls = rdfvalue.RDFValue.classes[stored_type_name]
# pylint: disable=cell-var-from-loop
def GetValues():
for timestamp, value in output_collection.ScanByType(stored_type_name):
_ = timestamp
if source_urn:
value.source = source_urn
yield value
# pylint: enable=cell-var-from-loop
for chunk in plugin.ProcessValues(stored_cls, GetValues):
yield chunk
for chunk in plugin.Finish():
yield chunk | Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
"source" attribute set.
Yields:
Bytes chunks, as generated by the plugin. | ApplyPluginToMultiTypeCollection | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def ApplyPluginToTypedCollection(plugin, type_names, fetch_fn):
"""Applies instant output plugin to a collection of results.
Args:
plugin: InstantOutputPlugin instance.
type_names: List of type names (strings) to be processed.
fetch_fn: Function that takes a type name as an argument and returns
available items (FlowResult) corresponding to this type. Items are
returned as a generator
Yields:
Bytes chunks, as generated by the plugin.
"""
for chunk in plugin.Start():
yield chunk
def GetValues(tn):
for v in fetch_fn(tn):
yield v
for type_name in sorted(type_names):
stored_cls = rdfvalue.RDFValue.classes[type_name]
for chunk in plugin.ProcessValues(
stored_cls, functools.partial(GetValues, type_name)
):
yield chunk
for chunk in plugin.Finish():
yield chunk | Applies instant output plugin to a collection of results.
Args:
plugin: InstantOutputPlugin instance.
type_names: List of type names (strings) to be processed.
fetch_fn: Function that takes a type name as an argument and returns
available items (FlowResult) corresponding to this type. Items are
returned as a generator
Yields:
Bytes chunks, as generated by the plugin. | ApplyPluginToTypedCollection | python | google/grr | grr/server/grr_response_server/instant_output_plugin.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/instant_output_plugin.py | Apache-2.0 |
def GetClientVersion(client_id):
"""Returns last known GRR version that the client used."""
sinfo = data_store.REL_DB.ReadClientStartupInfo(client_id=client_id)
if sinfo is not None:
return sinfo.client_info.client_version
else:
return config.CONFIG["Source.version_numeric"] | Returns last known GRR version that the client used. | GetClientVersion | python | google/grr | grr/server/grr_response_server/data_store_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store_utils.py | Apache-2.0 |
def GetClientOs(client_id: str) -> str:
"""Returns last known operating system name that the client used."""
if (snapshot := data_store.REL_DB.ReadClientSnapshot(client_id)) is not None:
return snapshot.knowledge_base.os
else:
return "" | Returns last known operating system name that the client used. | GetClientOs | python | google/grr | grr/server/grr_response_server/data_store_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store_utils.py | Apache-2.0 |
def GetFileHashEntry(fd):
"""Returns an `rdf_crypto.Hash` instance for given AFF4 file descriptor."""
# Hash file store is not migrated to RELDB just yet, hence the first check.
client_id, vfs_path = fd.urn.Split(2)
path_type, components = rdf_objects.ParseCategorizedPath(vfs_path)
path_info = data_store.REL_DB.ReadPathInfo(client_id, path_type, components)
if path_info is None:
return None
return mig_objects.ToRDFPathInfo(path_info).hash_entry | Returns an `rdf_crypto.Hash` instance for given AFF4 file descriptor. | GetFileHashEntry | python | google/grr | grr/server/grr_response_server/data_store_utils.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/data_store_utils.py | Apache-2.0 |
def GetOutputPluginStates(output_plugins, source=None):
"""Initializes state for a list of output plugins."""
output_plugins_states = []
for plugin_descriptor in output_plugins:
plugin_class = plugin_descriptor.GetPluginClass()
try:
_, plugin_state = plugin_class.CreatePluginAndDefaultState(
source_urn=source, args=plugin_descriptor.args
)
except Exception as e: # pylint: disable=broad-except
raise ValueError(
"Plugin %s failed to initialize (%s)" % (plugin_class, e)
) from e
output_plugins_states.append(
rdf_flow_runner.OutputPluginState(
plugin_state=plugin_state, plugin_descriptor=plugin_descriptor
)
)
return output_plugins_states | Initializes state for a list of output plugins. | GetOutputPluginStates | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def RandomFlowId() -> str:
"""Returns a random flow id encoded as a hex string."""
return "{:016X}".format(random.Id64()) | Returns a random flow id encoded as a hex string. | RandomFlowId | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def __init__(
self,
parent_type: _ParentType,
parent_id: Optional[str] = None,
parent_flow_obj=None,
):
"""Instantiates a FlowParent. Use the class methods instead."""
self.type = parent_type
self.id = parent_id
self.flow_obj = parent_flow_obj | Instantiates a FlowParent. Use the class methods instead. | __init__ | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_flow(self) -> bool:
"""True, if the flow is started as child-flow."""
return self.type == _ParentType.FLOW | True, if the flow is started as child-flow. | is_flow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_hunt(self) -> bool:
"""True, if the flow is started as part of a hunt."""
return self.type == _ParentType.HUNT | True, if the flow is started as part of a hunt. | is_hunt | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_root(self) -> bool:
"""True, if the flow is started as top-level flow."""
return self.type == _ParentType.ROOT | True, if the flow is started as top-level flow. | is_root | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def is_scheduled_flow(self) -> bool:
"""True, if the flow is started from a ScheduledFlow."""
return self.type == _ParentType.SCHEDULED_FLOW | True, if the flow is started from a ScheduledFlow. | is_scheduled_flow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromFlow(cls, flow_obj) -> "FlowParent":
"""References another flow (flow_base.FlowBase) as parent."""
return cls(_ParentType.FLOW, flow_obj.rdf_flow.flow_id, flow_obj) | References another flow (flow_base.FlowBase) as parent. | FromFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromHuntID(cls, hunt_id: str) -> "FlowParent":
"""References another hunt as parent by its ID."""
return cls(_ParentType.HUNT, hunt_id) | References another hunt as parent by its ID. | FromHuntID | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromRoot(cls) -> "FlowParent":
"""References no parent to mark a flow as top-level flow."""
return cls(_ParentType.ROOT) | References no parent to mark a flow as top-level flow. | FromRoot | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def FromScheduledFlowID(cls, scheduled_flow_id: str) -> "FlowParent":
"""References a ScheduledFlow as parent by its ID."""
return cls(_ParentType.SCHEDULED_FLOW, scheduled_flow_id) | References a ScheduledFlow as parent by its ID. | FromScheduledFlowID | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def StartFlow(
client_id: Optional[str] = None,
cpu_limit: Optional[int] = None,
creator: Optional[str] = None,
flow_args: Optional[rdf_structs.RDFStruct] = None,
flow_cls=None,
network_bytes_limit: Optional[int] = None,
original_flow: Optional[rdf_objects.FlowReference] = None,
output_plugins: Optional[
Sequence[rdf_output_plugin.OutputPluginDescriptor]
] = None,
start_at: Optional[rdfvalue.RDFDatetime] = None,
parent: Optional[FlowParent] = None,
runtime_limit: Optional[rdfvalue.Duration] = None,
) -> str:
"""The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent: A FlowParent referencing the parent, or None for top-level flows.
runtime_limit: Runtime limit as Duration for all ClientActions.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided.
"""
# Is the required flow a known flow?
try:
registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
except ValueError:
GRR_FLOW_INVALID_FLOW_COUNT.Increment()
raise ValueError("Unable to locate flow %s" % flow_cls.__name__)
if not client_id:
raise ValueError("Client_id is needed to start a flow.")
# Now parse the flow args into the new object from the keywords.
if flow_args is None:
flow_args = flow_cls.args_type()
if not isinstance(flow_args, flow_cls.args_type):
raise TypeError(
f"Flow args must be of type {flow_cls.args_type}, got"
f" {type(flow_args)} with contents: {flow_args!r}."
)
# Check that the flow args are valid.
flow_args.Validate()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_class_name=flow_cls.__name__,
args=flow_args,
creator=creator,
output_plugins=output_plugins,
original_flow=original_flow,
flow_state="RUNNING",
)
if parent is None:
parent = FlowParent.FromRoot()
if parent.is_hunt or parent.is_scheduled_flow:
# When starting a flow from a hunt or ScheduledFlow, re-use the parent's id
# to make it easy to find flows. For hunts, every client has a top-level
# flow with the hunt's id.
rdf_flow.flow_id = parent.id
else: # For new top-level and child flows, assign a random ID.
rdf_flow.flow_id = RandomFlowId()
# For better performance, only do conflicting IDs check for top-level flows.
if not parent.is_flow:
try:
data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
except db.UnknownFlowError:
pass
if parent.is_flow: # A flow is a nested flow.
parent_rdf_flow = parent.flow_obj.rdf_flow
rdf_flow.long_flow_id = "%s/%s" % (
parent_rdf_flow.long_flow_id,
rdf_flow.flow_id,
)
rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
rdf_flow.parent_request_id = parent.flow_obj.GetCurrentOutboundId()
if parent_rdf_flow.creator:
rdf_flow.creator = parent_rdf_flow.creator
elif parent.is_hunt: # Root-level hunt-induced flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
rdf_flow.parent_hunt_id = parent.id
elif parent.is_root or parent.is_scheduled_flow:
# A flow is a root-level non-hunt flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
else:
raise ValueError(f"Unknown flow parent type {parent}")
if output_plugins:
rdf_flow.output_plugins_states = GetOutputPluginStates(
output_plugins, rdf_flow.long_flow_id
)
if network_bytes_limit is not None:
rdf_flow.network_bytes_limit = network_bytes_limit
if cpu_limit is not None:
rdf_flow.cpu_limit = cpu_limit
if runtime_limit is not None:
rdf_flow.runtime_limit_us = runtime_limit
logging.info(
"Starting %s(%s) on %s (%s)",
rdf_flow.long_flow_id,
rdf_flow.flow_class_name,
client_id,
start_at or "now",
)
rdf_flow.current_state = "Start"
flow_obj = flow_cls(rdf_flow)
# Prevent a race condition, where a flow is scheduled twice, because one
# worker inserts the row and another worker silently updates the existing row.
allow_update = False
if start_at is None:
# Store an initial version of the flow straight away. This is needed so the
# database doesn't raise consistency errors due to missing parent keys when
# writing logs / errors / results which might happen in Start().
try:
proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow)
data_store.REL_DB.WriteFlowObject(proto_flow, allow_update=False)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
allow_update = True
try:
# Just run the first state inline. NOTE: Running synchronously means
# that this runs on the thread that starts the flow. The advantage is
# that that Start method can raise any errors immediately.
flow_obj.Start()
# The flow does not need to actually remain running.
if not flow_obj.outstanding_requests:
flow_obj.RunStateMethod("End")
# Additional check for the correct state in case the End method raised
# and terminated the flow.
if flow_obj.IsRunning():
flow_obj.MarkDone()
except Exception as e: # pylint: disable=broad-except
# We catch all exceptions that happen in Start() and mark the flow as
# failed.
msg = str(e)
flow_obj.Error(error_message=msg, backtrace=traceback.format_exc())
else:
flow_obj.CallState("Start", start_time=start_at)
flow_obj.PersistState()
try:
proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow)
data_store.REL_DB.WriteFlowObject(proto_flow, allow_update=allow_update)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
if parent.is_flow:
# We can optimize here and not write requests/responses to the database
# since we have to do this for the parent flow at some point anyways.
parent.flow_obj.MergeQueuedMessages(flow_obj)
else:
flow_obj.FlushQueuedMessages()
return rdf_flow.flow_id | The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent: A FlowParent referencing the parent, or None for top-level flows.
runtime_limit: Runtime limit as Duration for all ClientActions.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided. | StartFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def ScheduleFlow(
client_id: str,
creator: str,
flow_name: str,
flow_args: any_pb2.Any,
runner_args: flows_pb2.FlowRunnerArgs,
) -> flows_pb2.ScheduledFlow:
"""Schedules a Flow on the client, to be started upon approval grant."""
scheduled_flow = flows_pb2.ScheduledFlow()
scheduled_flow.client_id = client_id
scheduled_flow.creator = creator
scheduled_flow.scheduled_flow_id = RandomFlowId()
scheduled_flow.flow_name = flow_name
scheduled_flow.flow_args.CopyFrom(flow_args)
scheduled_flow.runner_args.CopyFrom(runner_args)
scheduled_flow.create_time = int(rdfvalue.RDFDatetime.Now())
data_store.REL_DB.WriteScheduledFlow(scheduled_flow)
return scheduled_flow | Schedules a Flow on the client, to be started upon approval grant. | ScheduleFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def UnscheduleFlow(
client_id: str,
creator: str,
scheduled_flow_id: str,
) -> None:
"""Unschedules and deletes a previously scheduled flow."""
data_store.REL_DB.DeleteScheduledFlow(
client_id=client_id, creator=creator, scheduled_flow_id=scheduled_flow_id
) | Unschedules and deletes a previously scheduled flow. | UnscheduleFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def ListScheduledFlows(
client_id: str,
creator: str,
) -> Sequence[rdf_flow_objects.ScheduledFlow]:
"""Lists all scheduled flows of a user on a client."""
return data_store.REL_DB.ListScheduledFlows(
client_id=client_id, creator=creator
) | Lists all scheduled flows of a user on a client. | ListScheduledFlows | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def StartScheduledFlows(client_id: str, creator: str) -> None:
"""Starts all scheduled flows of a user on a client.
This function delegates to StartFlow() to start the actual flow. If an error
occurs during StartFlow(), the ScheduledFlow is not deleted, but it is
updated by writing the `error` field to the database. The exception is NOT
re-raised and the next ScheduledFlow is attempted to be started.
Args:
client_id: The ID of the client of the ScheduledFlows.
creator: The username of the user who created the ScheduledFlows.
Raises:
UnknownClientError: if no client with client_id exists.
UnknownGRRUserError: if creator does not exist as user.
"""
# Validate existence of Client and User. Data races are not an issue - no
# flows get started in any case.
data_store.REL_DB.ReadClientMetadata(client_id)
data_store.REL_DB.ReadGRRUser(creator)
scheduled_flows = ListScheduledFlows(client_id, creator)
for sf in scheduled_flows:
try:
sf = mig_flow_objects.ToRDFScheduledFlow(sf)
flow_id = _StartScheduledFlow(sf)
logging.info(
"Started Flow %s/%s from ScheduledFlow %s",
client_id,
flow_id,
sf.scheduled_flow_id,
)
except Exception: # pylint: disable=broad-except
logging.exception(
"Cannot start ScheduledFlow %s %s/%s from %s",
sf.flow_name,
sf.client_id,
sf.scheduled_flow_id,
sf.creator,
) | Starts all scheduled flows of a user on a client.
This function delegates to StartFlow() to start the actual flow. If an error
occurs during StartFlow(), the ScheduledFlow is not deleted, but it is
updated by writing the `error` field to the database. The exception is NOT
re-raised and the next ScheduledFlow is attempted to be started.
Args:
client_id: The ID of the client of the ScheduledFlows.
creator: The username of the user who created the ScheduledFlows.
Raises:
UnknownClientError: if no client with client_id exists.
UnknownGRRUserError: if creator does not exist as user. | StartScheduledFlows | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def _StartScheduledFlow(scheduled_flow: rdf_flow_objects.ScheduledFlow) -> str:
"""Starts a Flow from a ScheduledFlow and deletes the ScheduledFlow."""
sf = scheduled_flow
ra = scheduled_flow.runner_args
try:
flow_id = StartFlow(
client_id=sf.client_id,
creator=sf.creator,
flow_args=sf.flow_args,
flow_cls=registry.FlowRegistry.FlowClassByName(sf.flow_name),
output_plugins=ra.output_plugins,
start_at=rdfvalue.RDFDatetime.Now(),
parent=FlowParent.FromScheduledFlowID(sf.scheduled_flow_id),
cpu_limit=ra.cpu_limit,
network_bytes_limit=ra.network_bytes_limit,
# runtime_limit is missing in FlowRunnerArgs.
)
except Exception as e:
scheduled_flow = mig_flow_objects.ToProtoScheduledFlow(scheduled_flow)
scheduled_flow.error = str(e)
data_store.REL_DB.WriteScheduledFlow(scheduled_flow)
raise
data_store.REL_DB.DeleteScheduledFlow(
client_id=scheduled_flow.client_id,
creator=scheduled_flow.creator,
scheduled_flow_id=scheduled_flow.scheduled_flow_id,
)
return flow_id | Starts a Flow from a ScheduledFlow and deletes the ScheduledFlow. | _StartScheduledFlow | python | google/grr | grr/server/grr_response_server/flow.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow.py | Apache-2.0 |
def EnrollFleetspeakClientIfNeeded(
self,
client_id: str,
fleetspeak_validation_tags: Mapping[str, str],
) -> Optional[rdf_objects.ClientMetadata]:
"""Enrols a Fleetspeak-enabled client for use with GRR.
Args:
client_id: GRR client-id for the client.
fleetspeak_validation_tags: Validation tags supplied by Fleetspeak.
Returns:
None if the client is new, and actually got enrolled. This method
is a no-op if the client already exists (in which case the existing
client metadata is returned).
"""
client_urn = rdf_client.ClientURN(client_id)
# If already enrolled, return.
try:
return mig_objects.ToRDFClientMetadata(
data_store.REL_DB.ReadClientMetadata(client_id)
)
except db.UnknownClientError:
pass
logging.info("Enrolling a new Fleetspeak client: %r", client_id)
now = rdfvalue.RDFDatetime.Now()
data_store.REL_DB.WriteClientMetadata(
client_id,
first_seen=now,
last_ping=now,
fleetspeak_validation_info=fleetspeak_validation_tags,
)
# Publish the client enrollment message.
events.Events.PublishEvent(
"ClientEnrollment", client_urn, username=FRONTEND_USERNAME
)
return None | Enrols a Fleetspeak-enabled client for use with GRR.
Args:
client_id: GRR client-id for the client.
fleetspeak_validation_tags: Validation tags supplied by Fleetspeak.
Returns:
None if the client is new, and actually got enrolled. This method
is a no-op if the client already exists (in which case the existing
client metadata is returned). | EnrollFleetspeakClientIfNeeded | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def ReceiveMessages(
self,
client_id: str,
messages: Sequence[rdf_flows.GrrMessage],
) -> None:
"""Receives and processes the messages.
For each message we update the request object, and place the
response in that request's queue. If the request is complete, we
send a message to the worker.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues.
"""
now = time.time()
unprocessed_msgs = []
worker_message_handler_requests = []
frontend_message_handler_requests = []
dropped_count = 0
# TODO: Remove `fixed_messages` once old clients
# have been migrated.
fixed_messages = []
for message in messages:
if message.type != rdf_flows.GrrMessage.Type.STATUS:
fixed_messages.append(message)
continue
stat = rdf_flows.GrrStatus(message.payload)
if not stat.HasField("cpu_time_used"):
fixed_messages.append(message)
continue
if stat.cpu_time_used.HasField("deprecated_user_cpu_time"):
stat.cpu_time_used.user_cpu_time = (
stat.cpu_time_used.deprecated_user_cpu_time
)
stat.cpu_time_used.deprecated_user_cpu_time = None
if stat.cpu_time_used.HasField("deprecated_system_cpu_time"):
stat.cpu_time_used.system_cpu_time = (
stat.cpu_time_used.deprecated_system_cpu_time
)
stat.cpu_time_used.deprecated_system_cpu_time = None
message.payload = stat
fixed_messages.append(message)
messages = fixed_messages
msgs_by_session_id = collection.Group(messages, lambda m: m.session_id)
for session_id, msgs in msgs_by_session_id.items():
try:
for msg in msgs:
if (
msg.auth_state != msg.AuthorizationState.AUTHENTICATED
):
dropped_count += 1
continue
session_id_str = str(session_id)
if session_id_str in message_handlers.session_id_map:
request = rdf_objects.MessageHandlerRequest(
client_id=msg.source.Basename(),
handler_name=message_handlers.session_id_map[session_id],
request_id=msg.response_id or random.UInt32(),
request=msg.payload,
)
if request.handler_name in self._SHORTCUT_HANDLERS:
frontend_message_handler_requests.append(request)
else:
worker_message_handler_requests.append(request)
elif session_id_str in self.legacy_well_known_session_ids:
logging.debug(
"Dropping message for legacy well known session id %s",
session_id,
)
else:
unprocessed_msgs.append(msg)
except ValueError:
logging.exception(
"Unpacking error in at least one of %d messages for session id %s",
len(msgs),
session_id,
)
raise
if dropped_count:
logging.info(
"Dropped %d unauthenticated messages for %s", dropped_count, client_id
)
if unprocessed_msgs:
flow_responses = []
for message in unprocessed_msgs:
try:
response = rdf_flow_objects.FlowResponseForLegacyResponse(message)
except ValueError as e:
logging.warning(
"Failed to parse legacy FlowResponse:\n%s\n%s", e, message
)
else:
if isinstance(response, rdf_flow_objects.FlowStatus):
response = mig_flow_objects.ToProtoFlowStatus(response)
if isinstance(response, rdf_flow_objects.FlowIterator):
response = mig_flow_objects.ToProtoFlowIterator(response)
if isinstance(response, rdf_flow_objects.FlowResponse):
response = mig_flow_objects.ToProtoFlowResponse(response)
flow_responses.append(response)
data_store.REL_DB.WriteFlowResponses(flow_responses)
for msg in unprocessed_msgs:
if msg.type == rdf_flows.GrrMessage.Type.STATUS:
stat = rdf_flows.GrrStatus(msg.payload)
if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:
# A client crashed while performing an action, fire an event.
crash_details = rdf_client.ClientCrash(
client_id=client_id,
session_id=msg.session_id,
backtrace=stat.backtrace,
crash_message=stat.error_message,
timestamp=rdfvalue.RDFDatetime.Now(),
)
events.Events.PublishEvent(
"ClientCrash", crash_details, username=FRONTEND_USERNAME
)
if worker_message_handler_requests:
worker_message_handler_requests = [
mig_objects.ToProtoMessageHandlerRequest(r)
for r in worker_message_handler_requests
]
data_store.REL_DB.WriteMessageHandlerRequests(
worker_message_handler_requests
)
if frontend_message_handler_requests:
frontend_message_handler_requests = [
mig_objects.ToProtoMessageHandlerRequest(r)
for r in frontend_message_handler_requests
]
worker_lib.ProcessMessageHandlerRequests(
frontend_message_handler_requests
)
logging.debug(
"Received %s messages from %s in %s sec",
len(messages),
client_id,
time.time() - now,
) | Receives and processes the messages.
For each message we update the request object, and place the
response in that request's queue. If the request is complete, we
send a message to the worker.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues. | ReceiveMessages | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
def ReceiveRRGResponse(
self,
client_id: str,
response: rrg_pb2.Response,
) -> None:
"""Receives and processes a single response from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
response: A response to process.
"""
self.ReceiveRRGResponses(client_id, [response]) | Receives and processes a single response from the RRG agent.
Args:
client_id: An identifier of the client for which we process the response.
response: A response to process. | ReceiveRRGResponse | python | google/grr | grr/server/grr_response_server/frontend_lib.py | https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.