code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
182
url
stringlengths
46
251
license
stringclasses
4 values
def ReceiveRRGResponses( self, client_id: str, responses: Sequence[rrg_pb2.Response], ) -> None: """Receives and processes multiple responses from the RRG agent. Args: client_id: An identifier of the client for which we process the response. responses: Responses to process. """ flow_responses = [] flow_rrg_logs: dict[tuple[int, int], dict[int, rrg_pb2.Log]] = {} for response in responses: flow_response: Union[ flows_pb2.FlowResponse, flows_pb2.FlowStatus, flows_pb2.FlowIterator, ] if response.HasField("status"): flow_response = flows_pb2.FlowStatus() flow_response.network_bytes_sent = response.status.network_bytes_sent # TODO: Populate `cpu_time_used` and `runtime_us` if response.status.HasField("error"): # TODO: Convert RRG error types to GRR error types. flow_response.status = flows_pb2.FlowStatus.Status.ERROR flow_response.error_message = response.status.error.message else: flow_response.status = flows_pb2.FlowStatus.Status.OK elif response.HasField("result"): flow_response = flows_pb2.FlowResponse() flow_response.any_payload.CopyFrom(response.result) elif response.HasField("log"): request_rrg_logs = flow_rrg_logs.setdefault( (response.flow_id, response.request_id), {} ) request_rrg_logs[response.response_id] = response.log continue else: raise ValueError(f"Unexpected response: {response}") flow_response.client_id = client_id flow_response.flow_id = f"{response.flow_id:016X}" flow_response.request_id = response.request_id flow_response.response_id = response.response_id flow_responses.append(flow_response) data_store.REL_DB.WriteFlowResponses(flow_responses) for (flow_id, request_id), logs in flow_rrg_logs.items(): data_store.REL_DB.WriteFlowRRGLogs( client_id=client_id, flow_id=f"{flow_id:016X}", request_id=request_id, logs=logs, )
Receives and processes multiple responses from the RRG agent. Args: client_id: An identifier of the client for which we process the response. responses: Responses to process.
ReceiveRRGResponses
python
google/grr
grr/server/grr_response_server/frontend_lib.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py
Apache-2.0
def ReceiveRRGParcel( self, client_id: str, parcel: rrg_pb2.Parcel, ) -> None: """Receives and processes a single parcel from the RRG agent. Args: client_id: An identifier of the client for which we process the response. parcel: A parcel to process. """ self.ReceiveRRGParcels(client_id, [parcel])
Receives and processes a single parcel from the RRG agent. Args: client_id: An identifier of the client for which we process the response. parcel: A parcel to process.
ReceiveRRGParcel
python
google/grr
grr/server/grr_response_server/frontend_lib.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py
Apache-2.0
def ReceiveRRGParcels( self, client_id: str, parcels: Sequence[rrg_pb2.Parcel], ) -> None: """Receives and processes multiple parcels from the RRG agent. Args: client_id: An identifier of the client for which we process the response. parcels: Parcels to process. """ parcels_by_sink_name = {} for parcel in parcels: sink_name = rrg_pb2.Sink.Name(parcel.sink) parcels_by_sink_name.setdefault(sink_name, []).append(parcel) for sink_name, sink_parcels in parcels_by_sink_name.items(): RRG_PARCEL_COUNT.Increment(fields=[sink_name], delta=len(sink_parcels)) try: sinks.AcceptMany(client_id, parcels) except Exception: # pylint: disable=broad-exception-caught # TODO: `AcceptMany` should raise an error that specifies # which sink caused the exception. Then we don't have to increment the # count for all sinks. for sink_name in parcels_by_sink_name: RRG_PARCEL_ACCEPT_ERRORS.Increment(fields=[sink_name]) logging.exception("Failed to process parcels for '%s'", client_id)
Receives and processes multiple parcels from the RRG agent. Args: client_id: An identifier of the client for which we process the response. parcels: Parcels to process.
ReceiveRRGParcels
python
google/grr
grr/server/grr_response_server/frontend_lib.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/frontend_lib.py
Apache-2.0
def Start(self) -> None: """The first state of the flow."""
The first state of the flow.
Start
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def End(self) -> None: """Final state. This method is called prior to destruction of the flow. """
Final state. This method is called prior to destruction of the flow.
End
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallState( self, next_state: str = "", start_time: Optional[rdfvalue.RDFDatetime] = None, responses: Optional[Sequence[rdf_structs.RDFStruct]] = None, ): """This method is used to schedule a new state on a different worker. This is basically the same as CallFlow() except we are calling ourselves. The state will be invoked at a later time. Args: next_state: The state in this flow to be invoked. start_time: Start the flow at this time. This delays notification for flow processing into the future. Note that the flow may still be processed earlier if there are client responses waiting. responses: If specified, responses to be passed to the next state. Raises: ValueError: The next state specified does not exist. FlowError: Method shouldn't be used in this flow (only_protos_allowed). """ # Start method is special and not ran with `RunStateMethod` by `StartFlow`. # Rather, we call `CallState` directly because it can be scheduled for the # future (`start_time`), different than `RunStateMethod` that runs now. if self.only_protos_allowed and next_state != "Start": raise FlowError( "`CallState` is not allowed for flows that only allow protos. Use" " `CallStateProto` instead." ) if not getattr(self, next_state): raise ValueError("Next state %s is invalid." % next_state) request_id = self.GetNextOutboundId() if responses: for index, r in enumerate(responses): wrapped_response = rdf_flow_objects.FlowResponse( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=request_id, response_id=index, payload=r, ) self.flow_responses.append(wrapped_response) self.flow_responses.append( rdf_flow_objects.FlowStatus( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=request_id, response_id=len(responses) + 1, status=rdf_flow_objects.FlowStatus.Status.OK, ) ) nr_responses_expected = len(responses) + 1 else: nr_responses_expected = 0 flow_request = rdf_flow_objects.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=request_id, next_state=next_state, start_time=start_time, nr_responses_expected=nr_responses_expected, needs_processing=True, ) self.flow_requests.append(flow_request)
This method is used to schedule a new state on a different worker. This is basically the same as CallFlow() except we are calling ourselves. The state will be invoked at a later time. Args: next_state: The state in this flow to be invoked. start_time: Start the flow at this time. This delays notification for flow processing into the future. Note that the flow may still be processed earlier if there are client responses waiting. responses: If specified, responses to be passed to the next state. Raises: ValueError: The next state specified does not exist. FlowError: Method shouldn't be used in this flow (only_protos_allowed).
CallState
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallStateProto( self, next_state: str = "", start_time: Optional[rdfvalue.RDFDatetime] = None, responses: Optional[Sequence[pb_message.Message]] = None, ): """This method is used to schedule a new state on a different worker. This is basically the same as CallFlow() except we are calling ourselves. The state will be invoked at a later time. Args: next_state: The state in this flow to be invoked. start_time: Start the flow at this time. This delays notification for flow processing into the future. Note that the flow may still be processed earlier if there are client responses waiting. responses: If specified, responses to be passed to the next state. Raises: ValueError: The next state specified does not exist. """ if not getattr(self, next_state): raise ValueError("Next state %s is invalid." % next_state) request_id = self.GetNextOutboundId() if responses: for index, r in enumerate(responses): _ValidateProto(r) wrapped_response = flows_pb2.FlowResponse( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=request_id, response_id=index, ) wrapped_response.any_payload.Pack(r) # TODO: Remove dynamic `payload` field. wrapped_response.payload.Pack(r) self.proto_flow_responses.append(wrapped_response) self.proto_flow_responses.append( flows_pb2.FlowStatus( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=request_id, response_id=len(responses) + 1, status=flows_pb2.FlowStatus.Status.OK, ) ) nr_responses_expected = len(responses) + 1 else: nr_responses_expected = 0 flow_request = flows_pb2.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=request_id, next_state=next_state, nr_responses_expected=nr_responses_expected, needs_processing=True, ) if start_time is not None: flow_request.start_time = int(start_time) self.proto_flow_requests.append(flow_request)
This method is used to schedule a new state on a different worker. This is basically the same as CallFlow() except we are calling ourselves. The state will be invoked at a later time. Args: next_state: The state in this flow to be invoked. start_time: Start the flow at this time. This delays notification for flow processing into the future. Note that the flow may still be processed earlier if there are client responses waiting. responses: If specified, responses to be passed to the next state. Raises: ValueError: The next state specified does not exist.
CallStateProto
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallStateInline( self, messages: Optional[ Sequence[ Union[ rdf_flow_objects.FlowResponse, rdf_flow_objects.FlowStatus, rdf_flow_objects.FlowIterator, ], ] ] = None, next_state: str = "", request_data: Optional[Mapping[str, Any]] = None, responses: Optional[flow_responses.Responses] = None, ): """Calls a state inline (immediately). If `responses` is not specified, `messages` and `request_data` are used to create a `flow_responses.Responses` object. Otherwise `responses` is used as is. Args: messages: responses to be passed to the state (only used if `responses` is not provided). next_state: The state to be called. request_data: An arbitrary dict to be passed to the called state (only used if `responses` is not provided). responses: Responses to pass to the state (as is). If not specified, `messages` and `request_data` are used to create a `flow_responses.Responses` object. Raises: FlowError: Method shouldn't be used in this flow (only_protos_allowed). """ if self.only_protos_allowed: raise FlowError( "`CallStateInline` is not allowed for flows that only allow protos." " Use `CallStateInlineProtoWithResponses` or " ) if responses is None: responses = flow_responses.FakeResponses(messages, request_data) getattr(self, next_state)(responses)
Calls a state inline (immediately). If `responses` is not specified, `messages` and `request_data` are used to create a `flow_responses.Responses` object. Otherwise `responses` is used as is. Args: messages: responses to be passed to the state (only used if `responses` is not provided). next_state: The state to be called. request_data: An arbitrary dict to be passed to the called state (only used if `responses` is not provided). responses: Responses to pass to the state (as is). If not specified, `messages` and `request_data` are used to create a `flow_responses.Responses` object. Raises: FlowError: Method shouldn't be used in this flow (only_protos_allowed).
CallStateInline
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallStateInlineProtoWithResponses( self, next_state: str = "", responses: Optional[flow_responses.Responses[any_pb2.Any]] = None, ): """Calls a state inline (immediately). The state must be annotated with `@UseProto2AnyResponses`. Args: next_state: The state to be called. responses: Responses to pass to the state (as is). """ method = getattr(self, next_state) # Raise if the method is not annotated with `@UseProto2AnyResponses`. # This means it still expects RDFValues, we should use `CallStateInline`. if ( not hasattr(method, "_proto2_any_responses") or not method._proto2_any_responses # pylint: disable=protected-access ): raise ValueError( f"Method {method.__name__} is not annotated with" " `@UseProto2AnyResponses`. Please use `CallStateInline` instead." ) # Method expects Responses[any_pb2.Any]. if responses is not None: # TODO: Remove this check once flow targets use pytype. for r in responses: if not isinstance(r, any_pb2.Any): raise ValueError( f"Expected Responses[any_pb2.Any] but got Responses[{type(r)}]" ) method(responses)
Calls a state inline (immediately). The state must be annotated with `@UseProto2AnyResponses`. Args: next_state: The state to be called. responses: Responses to pass to the state (as is).
CallStateInlineProtoWithResponses
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallStateInlineProto( self, next_state: str = "", messages: Optional[Sequence[pb_message.Message]] = None, request_data: Optional[Mapping[str, Any]] = None, ) -> None: """Calls a state inline (immediately). The state must be annotated with `@UseProto2AnyResponses`. Args: next_state: The state to be called. messages: responses to be passed to the state. request_data: An arbitrary dict to be passed to the called state """ method = getattr(self, next_state) # Raise if the method is not annotated with `@UseProto2AnyResponses`. # This means it still expects RDFValues, we should use `CallStateInline`. if ( not hasattr(method, "_proto2_any_responses") or not method._proto2_any_responses # pylint: disable=protected-access ): raise ValueError( f"Method {method.__name__} is not annotated with" " `@UseProto2AnyResponses`. Please use `CallStateInline` instead." ) # Use `messages` and make sure they're packed into `any_pb2.Any`s. any_msgs: list[any_pb2.Any] = [] if messages is not None: for r in messages: _ValidateProto(r) if isinstance(r, any_pb2.Any): raise ValueError( f"Expected unpacked proto message but got an any_pb2.Any: {r}" ) any_msg = any_pb2.Any() any_msg.Pack(r) any_msgs.append(any_msg) responses = flow_responses.FakeResponses(any_msgs, request_data) method(responses)
Calls a state inline (immediately). The state must be annotated with `@UseProto2AnyResponses`. Args: next_state: The state to be called. messages: responses to be passed to the state. request_data: An arbitrary dict to be passed to the called state
CallStateInlineProto
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def _GetAndCheckResourceLimits(self) -> _ResourceLimits: """Calculates and checks if the flow has exceeded any resource limits. Returns: A _ResourceLimits object with the calculated limits. Raises: FlowResourcesExceededError: If any resource limit has been exceeded. """ cpu_limit_ms = None network_bytes_limit = None runtime_limit_us = self.rdf_flow.runtime_limit_us if self.rdf_flow.cpu_limit: cpu_usage = self.rdf_flow.cpu_time_used cpu_limit_ms = 1000 * max( self.rdf_flow.cpu_limit - cpu_usage.user_cpu_time - cpu_usage.system_cpu_time, 0, ) if cpu_limit_ms == 0: raise flow.FlowResourcesExceededError( "CPU limit exceeded for {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id ) ) if self.rdf_flow.network_bytes_limit: network_bytes_limit = max( self.rdf_flow.network_bytes_limit - self.rdf_flow.network_bytes_sent, 0, ) if network_bytes_limit == 0: raise flow.FlowResourcesExceededError( "Network limit exceeded for {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id ) ) if runtime_limit_us and self.rdf_flow.runtime_us: if self.rdf_flow.runtime_us < runtime_limit_us: runtime_limit_us -= self.rdf_flow.runtime_us else: raise flow.FlowResourcesExceededError( "Runtime limit exceeded for {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id ) ) return self._ResourceLimits( cpu_limit_ms=cpu_limit_ms, network_bytes_limit=network_bytes_limit, runtime_limit_us=runtime_limit_us, )
Calculates and checks if the flow has exceeded any resource limits. Returns: A _ResourceLimits object with the calculated limits. Raises: FlowResourcesExceededError: If any resource limit has been exceeded.
_GetAndCheckResourceLimits
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallClient( self, action_cls: Type[server_stubs.ClientActionStub], request: Optional[rdfvalue.RDFValue] = None, next_state: Optional[str] = None, callback_state: Optional[str] = None, request_data: Optional[Mapping[str, Any]] = None, ): """Calls the client asynchronously. This sends a message to the client to invoke an Action. The run action may send back many responses that will be queued by the framework until a status message is sent by the client. The status message will cause the entire transaction to be committed to the specified state. Args: action_cls: The function to call on the client. request: The request to send to the client. Must be of the correct type for the action. next_state: The state in this flow, that responses to this message should go to. callback_state: (optional) The state to call whenever a new response is arriving. request_data: A dict which will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). Raises: ValueError: The request passed to the client does not have the correct type. FlowError: Method shouldn't be used in this flow (only_protos_allowed). """ if self.only_protos_allowed: raise FlowError( "`CallClient` is not allowed for flows that only allow protos. Use" " `CallClientProto` instead." ) try: action_identifier = action_registry.ID_BY_ACTION_STUB[action_cls] except KeyError: raise ValueError("Action class %s not known." % action_cls) if action_cls.in_rdfvalue is None: if request: raise ValueError("Client action %s does not expect args." % action_cls) else: # Verify that the request type matches the client action requirements. if not isinstance(request, action_cls.in_rdfvalue): raise ValueError( "Client action expected %s but got %s" % (action_cls.in_rdfvalue, type(request)) ) outbound_id = self.GetNextOutboundId() # Create a flow request. flow_request = rdf_flow_objects.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=outbound_id, next_state=next_state, callback_state=callback_state, ) if request_data is not None: flow_request.request_data = rdf_protodict.Dict().FromDict(request_data) limits = self._GetAndCheckResourceLimits() stub = action_registry.ACTION_STUB_BY_ID[action_identifier] client_action_request = rdf_flows.GrrMessage( session_id="%s/%s" % (self.rdf_flow.client_id, self.rdf_flow.flow_id), name=stub.__name__, request_id=outbound_id, payload=request, network_bytes_limit=limits.network_bytes_limit, runtime_limit_us=limits.runtime_limit_us, ) if limits.cpu_limit_ms is not None: client_action_request.cpu_limit = limits.cpu_limit_ms / 1000.0 self.flow_requests.append(flow_request) self.client_action_requests.append(client_action_request)
Calls the client asynchronously. This sends a message to the client to invoke an Action. The run action may send back many responses that will be queued by the framework until a status message is sent by the client. The status message will cause the entire transaction to be committed to the specified state. Args: action_cls: The function to call on the client. request: The request to send to the client. Must be of the correct type for the action. next_state: The state in this flow, that responses to this message should go to. callback_state: (optional) The state to call whenever a new response is arriving. request_data: A dict which will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). Raises: ValueError: The request passed to the client does not have the correct type. FlowError: Method shouldn't be used in this flow (only_protos_allowed).
CallClient
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallClientProto( self, action_cls: Type[server_stubs.ClientActionStub], action_args: Optional[pb_message.Message] = None, next_state: Optional[str] = None, callback_state: Optional[str] = None, request_data: Optional[dict[str, Any]] = None, ): """Calls the client asynchronously. This sends a message to the client to invoke an Action. The run action may send back many responses that will be queued by the framework until a status message is sent by the client. The status message will cause the entire transaction to be committed to the specified state. Args: action_cls: The function to call on the client. action_args: The arguments to send to the client. Must be of the correct type for the action. next_state: The state in this flow, that responses to this message should go to. callback_state: (optional) The state to call whenever a new response is arriving. request_data: A dict which will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). Raises: ValueError: The client action does not exist/is not registered. TypeError: The arguments passed to the client does not have the correct type. """ try: action_registry.ID_BY_ACTION_STUB[action_cls] except KeyError: raise ValueError("Action class %s not known." % action_cls) from None if action_cls.in_proto is None and action_args: raise ValueError( f"Client action {action_cls.__name__} does not expect args yet some" f" were provided: {action_args}" ) elif action_cls.in_proto is not None: if action_args is None: raise ValueError( f"Client action {action_cls.__name__} expects args, but none were" " provided." ) # Verify that the action_args type matches the client action requirements. if not isinstance(action_args, action_cls.in_proto): raise ValueError( "Client action expected %s but got %s" % (action_cls.in_proto, type(action_args)) ) outbound_id = self.GetNextOutboundId() # Create a flow request. flow_request = flows_pb2.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=outbound_id, next_state=next_state, callback_state=callback_state, ) if request_data is not None: flow_request.request_data.CopyFrom( mig_protodict.FromNativeDictToProtoDict(request_data) ) limits = self._GetAndCheckResourceLimits() client_action_request = jobs_pb2.GrrMessage( session_id="%s/%s" % (self.rdf_flow.client_id, self.rdf_flow.flow_id), name=action_cls.__name__, request_id=outbound_id, network_bytes_limit=limits.network_bytes_limit, runtime_limit_us=limits.runtime_limit_us, ) if action_args: # We rely on the fact that the in_proto and in_rdfvalue fields in the stub # represent the same type. That is: # cls.in_rdfvalue.protobuf == cls.in_proto # We use that to manually build the proto as prescribed by the GrrMessage # RDF class. models_clients.SetGrrMessagePayload( client_action_request, action_cls.in_rdfvalue.__name__, action_args ) self.proto_flow_requests.append(flow_request) self.proto_client_action_requests.append(client_action_request)
Calls the client asynchronously. This sends a message to the client to invoke an Action. The run action may send back many responses that will be queued by the framework until a status message is sent by the client. The status message will cause the entire transaction to be committed to the specified state. Args: action_cls: The function to call on the client. action_args: The arguments to send to the client. Must be of the correct type for the action. next_state: The state in this flow, that responses to this message should go to. callback_state: (optional) The state to call whenever a new response is arriving. request_data: A dict which will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). Raises: ValueError: The client action does not exist/is not registered. TypeError: The arguments passed to the client does not have the correct type.
CallClientProto
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallFlow( self, flow_name: Optional[str] = None, next_state: Optional[str] = None, request_data: Optional[Mapping[str, Any]] = None, output_plugins: Optional[ Sequence[rdf_output_plugin.OutputPluginDescriptor] ] = None, flow_args: Optional[rdf_structs.RDFStruct] = None, ) -> str: """Creates a new flow and send its responses to a state. This creates a new flow. The flow may send back many responses which will be queued by the framework until the flow terminates. The final status message will cause the entire transaction to be committed to the specified state. Args: flow_name: The name of the flow to invoke. next_state: The state in this flow, that responses to this message should go to. request_data: Any dict provided here will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). There is no format mandated on this data but it may be a serialized protobuf. output_plugins: A list of output plugins to use for this flow. flow_args: Arguments for the child flow. Returns: The flow_id of the child flow which was created. Raises: ValueError: The requested next state does not exist. FlowError: Method shouldn't be used in this flow (only_protos_allowed). """ if self.only_protos_allowed: raise FlowError( "`CallFlow` is not allowed for flows that only allow protos. Use" " `CallFlowProto` instead." ) if not getattr(self, next_state): raise ValueError("Next state %s is invalid." % next_state) flow_request = rdf_flow_objects.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=self.GetNextOutboundId(), next_state=next_state, ) if request_data is not None: flow_request.request_data = rdf_protodict.Dict().FromDict(request_data) self.flow_requests.append(flow_request) flow_cls = FlowRegistry.FlowClassByName(flow_name) return flow.StartFlow( client_id=self.rdf_flow.client_id, flow_cls=flow_cls, parent=flow.FlowParent.FromFlow(self), output_plugins=output_plugins, flow_args=flow_args, )
Creates a new flow and send its responses to a state. This creates a new flow. The flow may send back many responses which will be queued by the framework until the flow terminates. The final status message will cause the entire transaction to be committed to the specified state. Args: flow_name: The name of the flow to invoke. next_state: The state in this flow, that responses to this message should go to. request_data: Any dict provided here will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). There is no format mandated on this data but it may be a serialized protobuf. output_plugins: A list of output plugins to use for this flow. flow_args: Arguments for the child flow. Returns: The flow_id of the child flow which was created. Raises: ValueError: The requested next state does not exist. FlowError: Method shouldn't be used in this flow (only_protos_allowed).
CallFlow
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def CallFlowProto( self, flow_name: Optional[str] = None, next_state: Optional[str] = None, request_data: Optional[dict[str, Any]] = None, output_plugins: Optional[ Sequence[rdf_output_plugin.OutputPluginDescriptor] ] = None, flow_args: Optional[pb_message.Message] = None, ) -> str: """Creates a new flow and send its responses to a state. This creates a new flow. The flow may send back many responses which will be queued by the framework until the flow terminates. The final status message will cause the entire transaction to be committed to the specified state. Args: flow_name: The name of the flow to invoke. next_state: The state in this flow, that responses to this message should go to. request_data: Any dict provided here will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). There is no format mandated on this data but it may be a serialized protobuf. output_plugins: A list of output plugins to use for this flow. flow_args: Arguments for the child flow. Returns: The flow_id of the child flow which was created. Raises: ValueError: The requested next state does not exist. """ if not getattr(self, next_state): raise ValueError("Next state %s is invalid." % next_state) flow_request = flows_pb2.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=self.GetNextOutboundId(), next_state=next_state, ) if request_data is not None: flow_request.request_data.CopyFrom( mig_protodict.FromNativeDictToProtoDict(request_data) ) self.proto_flow_requests.append(flow_request) flow_cls = FlowRegistry.FlowClassByName(flow_name) rdf_flow_args = None if flow_args: if flow_cls.args_type.protobuf != type(flow_args): raise ValueError( f"Flow {flow_name} expects args of type" f" {flow_cls.args_type.protobuf} but got {type(flow_args)}" ) # We try on a best-effort basis to convert the flow args to RDFValue. rdf_flow_args = flow_cls.args_type.FromSerializedBytes( flow_args.SerializeToString() ) # TODO: Allow `StartFlow` to take proto args in. return flow.StartFlow( client_id=self.rdf_flow.client_id, flow_cls=flow_cls, parent=flow.FlowParent.FromFlow(self), output_plugins=output_plugins, flow_args=rdf_flow_args, )
Creates a new flow and send its responses to a state. This creates a new flow. The flow may send back many responses which will be queued by the framework until the flow terminates. The final status message will cause the entire transaction to be committed to the specified state. Args: flow_name: The name of the flow to invoke. next_state: The state in this flow, that responses to this message should go to. request_data: Any dict provided here will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). There is no format mandated on this data but it may be a serialized protobuf. output_plugins: A list of output plugins to use for this flow. flow_args: Arguments for the child flow. Returns: The flow_id of the child flow which was created. Raises: ValueError: The requested next state does not exist.
CallFlowProto
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def SendReply( self, response: rdfvalue.RDFValue, tag: Optional[str] = None ) -> None: """Allows this flow to send a message to its parent flow. If this flow does not have a parent, the message is saved to the database as flow result. Args: response: An RDFValue() instance to be sent to the parent. tag: If specified, tag the result with this tag. Raises: ValueError: If responses is not of the correct type. FlowError: Method shouldn't be used in this flow (only_protos_allowed). """ if self.only_protos_allowed: raise FlowError( "`SendReply` is not allowed for flows that only allow protos. Use" " `SendReplyProto` instead." ) if not isinstance(response, rdfvalue.RDFValue): raise ValueError( f"SendReply can only send RDFValues, got {type(response)}" ) if not any(isinstance(response, t) for t in self.result_types): logging.warning( "Flow %s sends response of unexpected type %s.", type(self).__name__, type(response).__name__, ) reply = rdf_flow_objects.FlowResult( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, payload=response, tag=tag, ) if self.rdf_flow.parent_flow_id: if isinstance(response, rdf_structs.RDFProtoStruct): rdf_packed_payload = rdf_structs.AnyValue.Pack(response) else: # Should log for `GetMBR` flow which returns `RDFBytes`. # Might fail for others that we're unaware but also return primitives. logging.error( "Flow %s sends response of unexpected type %s.", self.__class__.__name__, type(response), ) rdf_packed_payload = None flow_response = rdf_flow_objects.FlowResponse( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), payload=response, any_payload=rdf_packed_payload, flow_id=self.rdf_flow.parent_flow_id, tag=tag, ) self.flow_responses.append(flow_response) # For nested flows we want the replies to be written, # but not to be processed by output plugins. self.replies_to_write.append(reply) else: self.replies_to_write.append(reply) self.replies_to_process.append(reply) self.rdf_flow.num_replies_sent += 1 # Keeping track of result types/tags in a plain Python # _num_replies_per_type_tag dict. In RDFValues/proto2 we have to represent # dictionaries as lists of key-value pairs (i.e. there's no library # support for dicts as data structures). Hence, updating a key would require # iterating over the pairs - which might get expensive for hundreds of # thousands of results. To avoid the issue we keep a non-serialized Python # dict to be later accumulated into a serializable FlowResultCount # in PersistState(). key = (type(response).__name__, tag or "") self._num_replies_per_type_tag[key] += 1
Allows this flow to send a message to its parent flow. If this flow does not have a parent, the message is saved to the database as flow result. Args: response: An RDFValue() instance to be sent to the parent. tag: If specified, tag the result with this tag. Raises: ValueError: If responses is not of the correct type. FlowError: Method shouldn't be used in this flow (only_protos_allowed).
SendReply
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def SendReplyProto( self, response: pb_message.Message, tag: Optional[str] = None, ) -> None: """Allows this flow to save a flow result to the database. In case of a child flow, results are also returned to the parent flow. Args: response: A protobuf instance to be sent to the parent. tag: If specified, tag the result with this tag. Raises: TypeError: If responses is not of the correct type. """ if not isinstance(response, pb_message.Message): raise TypeError( f"SendReplyProto can only send Protobufs, got {type(response)}" ) if not any(isinstance(response, t) for t in self.proto_result_types): raise TypeError( f"Flow {type(self).__name__} sends response of unexpected type" f" {type(response).__name__}. Expected one of" f" {self.proto_result_types}", ) reply = flows_pb2.FlowResult( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, tag=tag, ) reply.payload.Pack(response) self.proto_replies_to_write.append(reply) if self.rdf_flow.parent_flow_id: res = flows_pb2.FlowResponse( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), flow_id=self.rdf_flow.parent_flow_id, tag=tag, ) res.payload.Pack(response) res.any_payload.Pack(response) self.proto_flow_responses.append(res) else: # We only want to process replies with output plugins if this is # a parent flow (not nested). self.proto_replies_to_process.append(reply) self.rdf_flow.num_replies_sent += 1 # Keeping track of result types/tags in a plain Python # _num_replies_per_type_tag dict. In RDFValues/proto2 we have to represent # dictionaries as lists of key-value pairs (i.e. there's no library # support for dicts as data structures). Hence, updating a key would require # iterating over the pairs - which might get expensive for hundreds of # thousands of results. To avoid the issue we keep a non-serialized Python # dict to be later accumulated into a serializable FlowResultCount # in PersistState(). key = (type(response).__name__, tag or "") self._num_replies_per_type_tag[key] += 1
Allows this flow to save a flow result to the database. In case of a child flow, results are also returned to the parent flow. Args: response: A protobuf instance to be sent to the parent. tag: If specified, tag the result with this tag. Raises: TypeError: If responses is not of the correct type.
SendReplyProto
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def SaveResourceUsage(self, status: rdf_flow_objects.FlowStatus) -> None: """Method to tally resources.""" user_cpu = status.cpu_time_used.user_cpu_time system_cpu = status.cpu_time_used.system_cpu_time self.rdf_flow.cpu_time_used.user_cpu_time += user_cpu self.rdf_flow.cpu_time_used.system_cpu_time += system_cpu self.rdf_flow.network_bytes_sent += status.network_bytes_sent if not self.rdf_flow.runtime_us: self.rdf_flow.runtime_us = rdfvalue.Duration(0) if status.runtime_us: self.rdf_flow.runtime_us += status.runtime_us if self.rdf_flow.cpu_limit: user_cpu_total = self.rdf_flow.cpu_time_used.user_cpu_time system_cpu_total = self.rdf_flow.cpu_time_used.system_cpu_time if self.rdf_flow.cpu_limit < (user_cpu_total + system_cpu_total): # We have exceeded our CPU time limit, stop this flow. raise flow.FlowResourcesExceededError( "CPU limit exceeded for {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id ) ) if ( self.rdf_flow.network_bytes_limit and self.rdf_flow.network_bytes_limit < self.rdf_flow.network_bytes_sent ): # We have exceeded our byte limit, stop this flow. raise flow.FlowResourcesExceededError( "Network bytes limit exceeded {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id ) ) if ( self.rdf_flow.runtime_limit_us and self.rdf_flow.runtime_limit_us < self.rdf_flow.runtime_us ): raise flow.FlowResourcesExceededError( "Runtime limit exceeded {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id ) )
Method to tally resources.
SaveResourceUsage
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def Error( self, error_message: Optional[str] = None, backtrace: Optional[str] = None, status: Optional[rdf_structs.EnumNamedValue] = None, ) -> None: """Terminates this flow with an error.""" flow_name = self.__class__.__name__ is_child = bool(self.rdf_flow.parent_flow_id) exception_name = _ExtractExceptionName(error_message) FLOW_ERRORS.Increment(fields=[flow_name, is_child, exception_name]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id # backtrace is set for unexpected failures caught in a wildcard except # branch, thus these should be logged as error. backtrace is None for # faults that are anticipated in flows, thus should only be logged as # warning. if backtrace: logging.error( "Error in flow %s on %s: %s, %s", flow_id, client_id, error_message, backtrace, ) else: logging.warning( "Error in flow %s on %s: %s:", flow_id, client_id, error_message ) if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status_msg = rdf_flow_objects.FlowStatus( client_id=client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, runtime_us=self.rdf_flow.runtime_us, error_message=error_message, flow_id=self.rdf_flow.parent_flow_id, backtrace=backtrace, ) if status is not None: status_msg.status = status else: status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR if self.rdf_flow.parent_flow_id: self.flow_responses.append(status_msg) elif self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id) self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR if backtrace is not None: self.rdf_flow.backtrace = backtrace if error_message is not None: self.rdf_flow.error_message = error_message self.NotifyCreatorOfError()
Terminates this flow with an error.
Error
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def _ClearAllRequestsAndResponses(self) -> None: """Clears all requests and responses.""" client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id # Remove all requests queued for deletion that we delete in the call below. self.completed_requests = [ r for r in self.completed_requests if r.client_id != client_id or r.flow_id != flow_id ] data_store.REL_DB.DeleteAllFlowRequestsAndResponses(client_id, flow_id)
Clears all requests and responses.
_ClearAllRequestsAndResponses
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def NotifyAboutEnd(self) -> None: """Notify about the end of the flow.""" # Sum up number of replies to write with the number of already # written results. num_results = ( len(self.replies_to_write) + len(self.proto_replies_to_write) + data_store.REL_DB.CountFlowResults( self.rdf_flow.client_id, self.rdf_flow.flow_id ) ) flow_ref = objects_pb2.FlowReference( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id ) notification_lib.Notify( self.creator, objects_pb2.UserNotification.Type.TYPE_FLOW_RUN_COMPLETED, "Flow %s completed with %d %s" % ( self.__class__.__name__, num_results, num_results == 1 and "result" or "results", ), objects_pb2.ObjectReference( reference_type=objects_pb2.ObjectReference.Type.FLOW, flow=flow_ref ), )
Notify about the end of the flow.
NotifyAboutEnd
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def MarkDone(self, status=None): """Marks this flow as done.""" FLOW_COMPLETIONS.Increment(fields=[self.__class__.__name__]) # Notify our parent flow or hunt that we are done (if there's a parent flow # or hunt). if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id: status = rdf_flow_objects.FlowStatus( client_id=self.rdf_flow.client_id, request_id=self.rdf_flow.parent_request_id, response_id=self.GetNextResponseId(), status=rdf_flow_objects.FlowStatus.Status.OK, cpu_time_used=self.rdf_flow.cpu_time_used, network_bytes_sent=self.rdf_flow.network_bytes_sent, runtime_us=self.rdf_flow.runtime_us, flow_id=self.rdf_flow.parent_flow_id, ) if self.rdf_flow.parent_flow_id: self.flow_responses.append(status) elif self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id) self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED if self.ShouldSendNotifications(): self.NotifyAboutEnd()
Marks this flow as done.
MarkDone
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def Log(self, format_str: str, *args: object) -> None: """Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string """ # If there are no formatting arguments given, we do not format the message. # This behaviour is in-line with `logging.*` functions and allows one to log # messages with `%` without weird workarounds. if not args: message = format_str else: message = format_str % args log_entry = flows_pb2.FlowLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=message, ) data_store.REL_DB.WriteFlowLogEntry(log_entry)
Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string
Log
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def RunStateMethod( self, method_name: str, request: Optional[rdf_flow_objects.FlowRequest] = None, responses: Optional[ Sequence[ Union[ rdf_flow_objects.FlowResponse, rdf_flow_objects.FlowStatus, rdf_flow_objects.FlowIterator, ] ] ] = None, ) -> None: """Completes the request by calling the state method. Args: method_name: The name of the state method to call. request: A RequestState protobuf. responses: A list of FlowResponses, FlowStatuses, and FlowIterators responding to the request. Raises: FlowError: Processing time for the flow has expired. """ client_id = self.rdf_flow.client_id deadline = self.rdf_flow.processing_deadline if deadline and rdfvalue.RDFDatetime.Now() > deadline: raise FlowError( "Processing time for flow %s on %s expired." % (self.rdf_flow.flow_id, self.rdf_flow.client_id) ) self.rdf_flow.current_state = method_name if request and responses: logging.debug( "Running %s for flow %s on %s, %d responses.", method_name, self.rdf_flow.flow_id, client_id, len(responses), ) else: logging.debug( "Running %s for flow %s on %s", method_name, self.rdf_flow.flow_id, client_id, ) try: try: method = getattr(self, method_name) except AttributeError: raise ValueError( "Flow %s has no state method %s" % (self.__class__.__name__, method_name) ) from None # Prepare a responses object for the state method to use: if responses is not None and ( hasattr(method, "_proto2_any_responses") and method._proto2_any_responses # pylint: disable=protected-access ): responses = flow_responses.Responses.FromResponsesProto2Any( responses, request ) else: responses = flow_responses.Responses.FromResponses( request=request, responses=responses ) if responses.status is not None: self.SaveResourceUsage(responses.status) GRR_WORKER_STATES_RUN.Increment() if method_name == "Start": FLOW_STARTS.Increment(fields=[self.rdf_flow.flow_class_name]) method() elif method_name == "End": method() else: method(responses) # TODO: Refactor output plugins to be internally proto-based. if self.proto_replies_to_process: rdf_replies = [ mig_flow_objects.ToRDFFlowResult(r) for r in self.proto_replies_to_process ] self.replies_to_process.extend(rdf_replies) self.proto_replies_to_process = [] if self.replies_to_process: if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id: self._ProcessRepliesWithHuntOutputPlugins(self.replies_to_process) else: self._ProcessRepliesWithFlowOutputPlugins(self.replies_to_process) self.replies_to_process = [] except flow.FlowResourcesExceededError as e: logging.info( "Flow %s on %s exceeded resource limits: %s.", self.rdf_flow.flow_id, client_id, str(e), ) self.Error(error_message=str(e)) # We don't know here what exceptions can be thrown in the flow but we have # to continue. Thus, we catch everything. except Exception as e: # pylint: disable=broad-except msg = str(e) self.Error(error_message=msg, backtrace=traceback.format_exc())
Completes the request by calling the state method. Args: method_name: The name of the state method to call. request: A RequestState protobuf. responses: A list of FlowResponses, FlowStatuses, and FlowIterators responding to the request. Raises: FlowError: Processing time for the flow has expired.
RunStateMethod
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def ProcessAllReadyRequests(self) -> tuple[int, int]: """Processes all requests that are due to run. Returns: (processed, incrementally_processed) The number of completed processed requests and the number of incrementally processed ones. """ request_dict = data_store.REL_DB.ReadFlowRequests( self.rdf_flow.client_id, self.rdf_flow.flow_id, ) completed_requests = FindCompletedRequestsToProcess( request_dict, self.rdf_flow.next_request_to_process, ) incremental_requests = FindIncrementalRequestsToProcess( request_dict, self.rdf_flow.next_request_to_process, ) # When dealing with a callback flow, count all incremental requests even if # `incremental_requests` is empty, as it's expected that messages might # arrive in the wrong order and therefore not always be suitable for # processing. num_incremental = sum( [1 for _, (req, _) in request_dict.items() if req.callback_state] ) next_response_id_map = {} # Process incremental requests' updates first. Incremental requests have # the 'callback_state' attribute set and the callback state is called # every time new responses arrive. Note that the id of the next expected # response is kept in request's 'next_response_id' attribute to guarantee # that responses are going to be processed in the right order. for request, responses in incremental_requests: request = mig_flow_objects.ToRDFFlowRequest(request) if not self.IsRunning(): break # Responses have to be processed in the correct order, no response # can be skipped. rdf_responses = [] for r in responses: if isinstance(r, flows_pb2.FlowResponse): rdf_responses.append(mig_flow_objects.ToRDFFlowResponse(r)) if isinstance(r, flows_pb2.FlowStatus): rdf_responses.append(mig_flow_objects.ToRDFFlowStatus(r)) if isinstance(r, flows_pb2.FlowIterator): rdf_responses.append(mig_flow_objects.ToRDFFlowIterator(r)) if rdf_responses: # We do not sent incremental updates for FlowStatus updates. # TODO: Check if the id of last message in to_process, the # FlowStatus, is important to keep for the next_response_id map, as the # flow is anyways complete then. If not we can skip adding the # FlowStatus to the `to_process` list instead of filtering it out here. flow_updates = [ r for r in rdf_responses if not isinstance(r, rdf_flow_objects.FlowStatus) ] if flow_updates: self.RunStateMethod(request.callback_state, request, flow_updates) # If the request was processed, update the next_response_id. next_response_id_map[request.request_id] = ( rdf_responses[-1].response_id + 1 ) if next_response_id_map: data_store.REL_DB.UpdateIncrementalFlowRequests( self.rdf_flow.client_id, self.rdf_flow.flow_id, next_response_id_map ) # Process completed requests. # # If the flow gets a bunch of requests to process and processing one of # them leads to flow termination, other requests should be ignored. # Hence: self.IsRunning check in the loop's condition. for request, responses in completed_requests: if not self.IsRunning(): break rdf_request = mig_flow_objects.ToRDFFlowRequest(request) rdf_responses = [] for r in responses: if isinstance(r, flows_pb2.FlowResponse): rdf_responses.append(mig_flow_objects.ToRDFFlowResponse(r)) if isinstance(r, flows_pb2.FlowStatus): rdf_responses.append(mig_flow_objects.ToRDFFlowStatus(r)) if isinstance(r, flows_pb2.FlowIterator): rdf_responses.append(mig_flow_objects.ToRDFFlowIterator(r)) # If there's not even a `Status` response, we send `None` as response. if not rdf_responses: rdf_responses = None self.RunStateMethod(request.next_state, rdf_request, rdf_responses) self.rdf_flow.next_request_to_process += 1 self.completed_requests.append(request) if ( completed_requests and self.IsRunning() and not self.outstanding_requests ): self.RunStateMethod("End") if ( self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING and not self.outstanding_requests ): self.MarkDone() self.PersistState() if not self.IsRunning(): # All requests and responses can now be deleted. self._ClearAllRequestsAndResponses() return len(completed_requests), num_incremental
Processes all requests that are due to run. Returns: (processed, incrementally_processed) The number of completed processed requests and the number of incrementally processed ones.
ProcessAllReadyRequests
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def outstanding_requests(self) -> int: """Returns the number of all outstanding requests. This is used to determine if the flow needs to be destroyed yet. Returns: the number of all outstanding requests. """ return ( self.rdf_flow.next_outbound_id - self.rdf_flow.next_request_to_process )
Returns the number of all outstanding requests. This is used to determine if the flow needs to be destroyed yet. Returns: the number of all outstanding requests.
outstanding_requests
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def FlushQueuedMessages(self) -> None: """Flushes queued messages.""" # TODO(amoser): This could be done in a single db call, might be worth # optimizing. if self.flow_requests or self.proto_flow_requests: all_requests = [ mig_flow_objects.ToProtoFlowRequest(r) for r in self.flow_requests ] + self.proto_flow_requests # We make a single DB call to write all requests. Contrary to what the # name suggests, this method does more than writing the requests to the # DB. It also tallies the flows that need processing and updates the # next request to process. Writing the requests in separate calls can # interfere with this process. data_store.REL_DB.WriteFlowRequests(all_requests) self.flow_requests = [] self.proto_flow_requests = [] if self.flow_responses: flow_responses_proto = [] for r in self.flow_responses: if isinstance(r, rdf_flow_objects.FlowResponse): flow_responses_proto.append(mig_flow_objects.ToProtoFlowResponse(r)) if isinstance(r, rdf_flow_objects.FlowStatus): flow_responses_proto.append(mig_flow_objects.ToProtoFlowStatus(r)) if isinstance(r, rdf_flow_objects.FlowIterator): flow_responses_proto.append(mig_flow_objects.ToProtoFlowIterator(r)) data_store.REL_DB.WriteFlowResponses(flow_responses_proto) self.flow_responses = [] if self.proto_flow_responses: data_store.REL_DB.WriteFlowResponses(self.proto_flow_responses) self.proto_flow_responses = [] if self.client_action_requests: client_id = self.rdf_flow.client_id for request in self.client_action_requests: fleetspeak_utils.SendGrrMessageThroughFleetspeak(client_id, request) self.client_action_requests = [] if self.proto_client_action_requests: client_id = self.rdf_flow.client_id for request in self.proto_client_action_requests: fleetspeak_utils.SendGrrMessageProtoThroughFleetspeak( client_id, request ) self.proto_client_action_requests = [] for request in self.rrg_requests: fleetspeak_utils.SendRrgRequest(self.rdf_flow.client_id, request) self.rrg_requests = [] if self.completed_requests: data_store.REL_DB.DeleteFlowRequests(self.completed_requests) self.completed_requests = [] if self.proto_replies_to_write or self.replies_to_write: all_results = self.proto_replies_to_write + [ mig_flow_objects.ToProtoFlowResult(r) for r in self.replies_to_write ] # Write flow results to REL_DB, even if the flow is a nested flow. data_store.REL_DB.WriteFlowResults(all_results) if self.rdf_flow.parent_hunt_id: hunt.StopHuntIfCPUOrNetworkLimitsExceeded(self.rdf_flow.parent_hunt_id) self.proto_replies_to_write = [] self.replies_to_write = []
Flushes queued messages.
FlushQueuedMessages
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def _ProcessRepliesWithHuntOutputPlugins( self, replies: Sequence[rdf_flow_objects.FlowResult] ) -> None: """Applies output plugins to hunt results.""" hunt_obj = data_store.REL_DB.ReadHuntObject(self.rdf_flow.parent_hunt_id) hunt_obj = mig_hunt_objects.ToRDFHunt(hunt_obj) self.rdf_flow.output_plugins = hunt_obj.output_plugins hunt_output_plugins_states = data_store.REL_DB.ReadHuntOutputPluginsStates( self.rdf_flow.parent_hunt_id ) hunt_output_plugins_states = [ mig_flow_runner.ToRDFOutputPluginState(s) for s in hunt_output_plugins_states ] self.rdf_flow.output_plugins_states = hunt_output_plugins_states created_plugins = self._ProcessRepliesWithFlowOutputPlugins(replies) for index, (plugin, state) in enumerate( zip(created_plugins, hunt_output_plugins_states) ): if plugin is None: continue # Only do the REL_DB call if the plugin state has actually changed. s = state.plugin_state.Copy() plugin.UpdateState(s) if s != state.plugin_state: def UpdateFn( plugin_state: jobs_pb2.AttributedDict, ) -> jobs_pb2.AttributedDict: plugin_state_rdf = mig_protodict.ToRDFAttributedDict(plugin_state) plugin.UpdateState(plugin_state_rdf) # pylint: disable=cell-var-from-loop plugin_state = mig_protodict.ToProtoAttributedDict(plugin_state_rdf) return plugin_state data_store.REL_DB.UpdateHuntOutputPluginState( hunt_obj.hunt_id, index, UpdateFn ) for plugin_def, created_plugin in zip( hunt_obj.output_plugins, created_plugins ): if created_plugin is not None: HUNT_RESULTS_RAN_THROUGH_PLUGIN.Increment( len(replies), fields=[plugin_def.plugin_name] ) else: HUNT_OUTPUT_PLUGIN_ERRORS.Increment(fields=[plugin_def.plugin_name])
Applies output plugins to hunt results.
_ProcessRepliesWithHuntOutputPlugins
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def _ProcessRepliesWithFlowOutputPlugins( self, replies: Sequence[rdf_flow_objects.FlowResult] ) -> Sequence[Optional[output_plugin_lib.OutputPlugin]]: """Processes replies with output plugins.""" created_output_plugins = [] for index, output_plugin_state in enumerate( self.rdf_flow.output_plugins_states ): plugin_descriptor = output_plugin_state.plugin_descriptor output_plugin_cls = plugin_descriptor.GetPluginClass() args = plugin_descriptor.args output_plugin = output_plugin_cls( source_urn=self.rdf_flow.long_flow_id, args=args ) try: output_plugin.ProcessResponses( output_plugin_state.plugin_state, replies, ) output_plugin.Flush(output_plugin_state.plugin_state) output_plugin.UpdateState(output_plugin_state.plugin_state) data_store.REL_DB.WriteFlowOutputPluginLogEntry( flows_pb2.FlowOutputPluginLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, output_plugin_id="%d" % index, log_entry_type=flows_pb2.FlowOutputPluginLogEntry.LogEntryType.LOG, message="Processed %d replies." % len(replies), ) ) self.Log( "Plugin %s successfully processed %d flow replies.", plugin_descriptor, len(replies), ) created_output_plugins.append(output_plugin) except Exception as e: # pylint: disable=broad-except logging.exception( "Plugin %s failed to process %d replies.", plugin_descriptor, len(replies), ) created_output_plugins.append(None) data_store.REL_DB.WriteFlowOutputPluginLogEntry( flows_pb2.FlowOutputPluginLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, output_plugin_id="%d" % index, log_entry_type=flows_pb2.FlowOutputPluginLogEntry.LogEntryType.ERROR, message="Error while processing %d replies: %s" % (len(replies), str(e)), ) ) self.Log( "Plugin %s failed to process %d replies due to: %s", plugin_descriptor, len(replies), e, ) return created_output_plugins
Processes replies with output plugins.
_ProcessRepliesWithFlowOutputPlugins
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def MergeQueuedMessages(self, flow_obj: "FlowBase") -> None: """Merges queued messages.""" self.flow_requests.extend(flow_obj.flow_requests) flow_obj.flow_requests = [] self.proto_flow_requests.extend(flow_obj.proto_flow_requests) flow_obj.proto_flow_requests = [] self.flow_responses.extend(flow_obj.flow_responses) flow_obj.flow_responses = [] self.proto_flow_responses.extend(flow_obj.proto_flow_responses) flow_obj.proto_flow_responses = [] self.rrg_requests.extend(flow_obj.rrg_requests) flow_obj.rrg_requests = [] self.client_action_requests.extend(flow_obj.client_action_requests) flow_obj.client_action_requests = [] self.proto_client_action_requests.extend( flow_obj.proto_client_action_requests ) flow_obj.proto_client_action_requests = [] self.completed_requests.extend(flow_obj.completed_requests) flow_obj.completed_requests = [] self.replies_to_write.extend(flow_obj.replies_to_write) flow_obj.replies_to_write = [] self.proto_replies_to_write.extend(flow_obj.proto_replies_to_write) flow_obj.proto_replies_to_write = []
Merges queued messages.
MergeQueuedMessages
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def GetFilesArchiveMappings( self, flow_results: Iterator[rdf_flow_objects.FlowResult] ) -> Iterator[ClientPathArchiveMapping]: """Returns a mapping used to generate flow results archive. If this is implemented by a flow, then instead of generating a general-purpose archive with all files referenced in the results present, an archive would be generated with just the files referenced in the mappings. Args: flow_results: An iterator for flow results. Returns: An iterator of mappings from REL_DB's ClientPaths to archive paths. Raises: NotImplementedError: if not implemented by a subclass. """ raise NotImplementedError("GetFilesArchiveMappings() not implemented")
Returns a mapping used to generate flow results archive. If this is implemented by a flow, then instead of generating a general-purpose archive with all files referenced in the results present, an archive would be generated with just the files referenced in the mappings. Args: flow_results: An iterator for flow results. Returns: An iterator of mappings from REL_DB's ClientPaths to archive paths. Raises: NotImplementedError: if not implemented by a subclass.
GetFilesArchiveMappings
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def _AccountForProtoResultMetadata(self): """Merges `_num_replies_per_type_tag` Counter with current ResultMetadata.""" self._result_metadata.is_metadata_set = True for r in self._result_metadata.num_results_per_type_tag: key = (r.type, r.tag) # This removes the item from _num_replies_per_type_tag if it's present in # result_metadata. count = self._num_replies_per_type_tag.pop(key, 0) r.count = r.count + count # Iterate over remaining items - i.e. items that were not present in # result_metadata. for ( result_type, result_tag, ), count in self._num_replies_per_type_tag.items(): self._result_metadata.num_results_per_type_tag.append( flows_pb2.FlowResultCount( type=result_type, tag=result_tag, count=count ) ) self._num_replies_per_type_tag = collections.Counter() self.rdf_flow.result_metadata = ( rdf_flow_objects.FlowResultMetadata().FromSerializedBytes( self._result_metadata.SerializeToString() ) )
Merges `_num_replies_per_type_tag` Counter with current ResultMetadata.
_AccountForProtoResultMetadata
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def PersistState(self) -> None: """Persists flow state.""" self._AccountForProtoResultMetadata() self.rdf_flow.persistent_data = self.state if self._store is not None: self.rdf_flow.store = rdf_structs.AnyValue.PackProto2(self._store) if self._progress is not None: self.rdf_flow.progress = rdf_structs.AnyValue.PackProto2(self._progress)
Persists flow state.
PersistState
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def args(self, args: rdfvalue.RDFValue) -> None: """Updates both rdf and proto args.""" if not isinstance(args, self.args_type): raise TypeError( f"args must be of type {self.args_type}, got {type(args)} instead." ) self.rdf_flow.args = args self._proto_args = self.proto_args_type() self._proto_args.ParseFromString(args.SerializeToBytes())
Updates both rdf and proto args.
args
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def proto_args(self) -> _ProtoArgsT: """Returns the proto args.""" if self._proto_args is not None: return self._proto_args # We use `rdf_flow.args` as source of truth for now. if self.rdf_flow.HasField("args"): # Hope serialization is compatible args = self.proto_args_type() args.ParseFromString(self.args.SerializeToBytes()) self._proto_args = args else: self._proto_args = self.proto_args_type() return self._proto_args
Returns the proto args.
proto_args
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def UseProto2AnyResponses( state_method: Callable[ [FlowBase, flow_responses.Responses[any_pb2.Any]], None ], ) -> Callable[[FlowBase, flow_responses.Responses[any_pb2.Any]], None]: """Instructs flow execution not to use RDF magic for unpacking responses. The current default behaviour of the flow execution is to do type lookup and automagically unpack flow responses to "appropriate" type. This behaviour is problematic for many reasons and methods that do not need to rely on it should use this annotation. Args: state_method: A flow state method to annotate. Returns: A flow state method that will not have the problematic behaviour. """ @functools.wraps(state_method) def Wrapper(self, responses: flow_responses.Responses) -> None: return state_method(self, responses) Wrapper._proto2_any_responses = True # pylint: disable=protected-access return Wrapper
Instructs flow execution not to use RDF magic for unpacking responses. The current default behaviour of the flow execution is to do type lookup and automagically unpack flow responses to "appropriate" type. This behaviour is problematic for many reasons and methods that do not need to rely on it should use this annotation. Args: state_method: A flow state method to annotate. Returns: A flow state method that will not have the problematic behaviour.
UseProto2AnyResponses
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def _TerminateFlow( proto_flow: flows_pb2.Flow, reason: Optional[str] = None, flow_state: rdf_structs.EnumNamedValue = rdf_flow_objects.Flow.FlowState.ERROR, ) -> None: """Does the actual termination.""" flow_cls = FlowRegistry.FlowClassByName(proto_flow.flow_class_name) rdf_flow = mig_flow_objects.ToRDFFlow(proto_flow) flow_obj = flow_cls(rdf_flow) if not flow_obj.IsRunning(): # Nothing to do. return logging.info( "Terminating flow %s on %s, reason: %s", rdf_flow.flow_id, rdf_flow.client_id, reason, ) rdf_flow.flow_state = flow_state rdf_flow.error_message = reason flow_obj.NotifyCreatorOfError() proto_flow = mig_flow_objects.ToProtoFlow(rdf_flow) data_store.REL_DB.UpdateFlow( proto_flow.client_id, proto_flow.flow_id, flow_obj=proto_flow, processing_on=None, processing_since=None, processing_deadline=None, ) data_store.REL_DB.DeleteAllFlowRequestsAndResponses( proto_flow.client_id, proto_flow.flow_id )
Does the actual termination.
_TerminateFlow
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def TerminateFlow( client_id: str, flow_id: str, reason: Optional[str] = None, flow_state: rdf_structs.EnumNamedValue = rdf_flow_objects.Flow.FlowState.ERROR, ) -> None: """Terminates a flow and all of its children. Args: client_id: Client ID of a flow to terminate. flow_id: Flow ID of a flow to terminate. reason: String with a termination reason. flow_state: Flow state to be assigned to a flow after termination. Defaults to FlowState.ERROR. """ to_terminate = [data_store.REL_DB.ReadFlowObject(client_id, flow_id)] while to_terminate: next_to_terminate = [] for proto_flow in to_terminate: _TerminateFlow(proto_flow, reason=reason, flow_state=flow_state) next_to_terminate.extend( data_store.REL_DB.ReadChildFlowObjects( proto_flow.client_id, proto_flow.flow_id ) ) to_terminate = next_to_terminate
Terminates a flow and all of its children. Args: client_id: Client ID of a flow to terminate. flow_id: Flow ID of a flow to terminate. reason: String with a termination reason. flow_state: Flow state to be assigned to a flow after termination. Defaults to FlowState.ERROR.
TerminateFlow
python
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flow_base.py
Apache-2.0
def GetKnowledgeBase(rdf_client_obj, allow_uninitialized=False): """Returns a knowledgebase from an rdf client object.""" if not allow_uninitialized: if rdf_client_obj is None: raise artifact_utils.KnowledgeBaseUninitializedError( "No client snapshot given." ) if rdf_client_obj.knowledge_base is None: raise artifact_utils.KnowledgeBaseUninitializedError( "KnowledgeBase empty for %s." % rdf_client_obj.client_id ) kb = rdf_client_obj.knowledge_base if not kb.os: raise artifact_utils.KnowledgeBaseAttributesMissingError( "KnowledgeBase missing OS for %s. Knowledgebase content: %s" % (rdf_client_obj.client_id, kb) ) if rdf_client_obj is None or rdf_client_obj.knowledge_base is None: return rdf_client.KnowledgeBase() version = rdf_client_obj.os_version.split(".") kb = rdf_client_obj.knowledge_base try: kb.os_major_version = int(version[0]) if len(version) > 1: kb.os_minor_version = int(version[1]) except ValueError: pass return kb
Returns a knowledgebase from an rdf client object.
GetKnowledgeBase
python
google/grr
grr/server/grr_response_server/artifact.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py
Apache-2.0
def Start(self): """For each artifact, create subflows for each collector.""" self.state.knowledge_base = None self.InitializeKnowledgeBase() if self.client_os == "Linux": self.CallFlow( distro.CollectDistroInfo.__name__, next_state=self._ProcessLinuxDistroInfo.__name__, ) self.CallClient( server_stubs.EnumerateUsers, next_state=self._ProcessLinuxEnumerateUsers.__name__, ) elif self.client_os == "Darwin": list_users_dir_request = rdf_client_action.ListDirRequest() list_users_dir_request.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS list_users_dir_request.pathspec.path = "/Users" self.CallClient( server_stubs.ListDirectory, request=list_users_dir_request, next_state=self._ProcessMacosListUsersDirectory.__name__, ) elif self.client_os == "Windows": # pylint: disable=line-too-long # fmt: off if self.rrg_support: self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion", name="SystemRoot", ), next_state=self._ProcessRRGWindowsEnvSystemRoot.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows\CurrentVersion", name="ProgramFilesDir", ), next_state=self._ProcessRRGWindowsEnvProgramFilesDir.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows\CurrentVersion", name="ProgramFilesDir (x86)", ), next_state=self._ProcessRRGWindowsEnvProgramFilesDirX86.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows\CurrentVersion", name="CommonFilesDir", ), next_state=self._ProcessRRGWindowsEnvCommonFilesDir.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows\CurrentVersion", name="CommonFilesDir (x86)", ), next_state=self._ProcessRRGWindowsEnvCommonFilesDirX86.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList", name="ProgramData", ), next_state=self._ProcessRRGWindowsEnvProgramData.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", name="DriverData", ), next_state=self._ProcessRRGWindowsEnvDriverData.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\Select", name="Current", ), next_state=self._ProcessRRGWindowsCurrentControlSet.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Control\Nls\CodePage", name="ACP", ), next_state=self._ProcessRRGWindowsCodePage.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters", name="Domain", ), next_state=self._ProcessRRGWindowsDomain.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", name="TimeZoneKeyName", ), next_state=self._ProcessRRGWindowsTimeZoneKeyName.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", name="TEMP", ), next_state=self._ProcessRRGWindowsEnvTemp.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", name="Path", ), next_state=self._ProcessRRGWindowsEnvPath.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", name="ComSpec", ), next_state=self._ProcessRRGWindowsEnvComSpec.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", name="windir", ), next_state=self._ProcessRRGWindowsEnvWindir.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList", name="ProfilesDirectory", ), next_state=self._ProcessRRGWindowsProfilesDirectory.__name__, ) self.CallRRG( action=rrg_pb2.GET_WINREG_VALUE, args=rrg_get_winreg_value_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList", name="AllUsersProfile", ), next_state=self._ProcessRRGWindowsEnvAllUsersProfile.__name__, ) self.CallRRG( action=rrg_pb2.LIST_WINREG_KEYS, args=rrg_list_winreg_keys_pb2.Args( root=rrg_winreg_pb2.LOCAL_MACHINE, key=r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList", ), next_state=self._ProcessRRGWindowsProfileList.__name__, ) # WMI queries are slow, so we consider them "heavyweight". if not self.args.lightweight: users = self.state.knowledge_base.users self.CallRRG( action=rrg_pb2.QUERY_WMI, args=rrg_query_wmi_pb2.Args( query=f""" SELECT SID, Name, Domain FROM Win32_UserAccount WHERE LocalAccount = TRUE AND ({" OR ".join(f"SID = '{user.sid}'" for user in users)}) """, ), next_state=self._ProcessRRGWindowsWMIUserAccount.__name__, ) else: # TODO: There is no dedicated action for obtaining registry # values. The existing artifact collector uses `GetFileStat` action for # this which is horrible. args = rdf_client_action.GetFileStatRequest() args.pathspec.pathtype = rdf_paths.PathSpec.PathType.REGISTRY args.pathspec.path = r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\SystemRoot" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvSystemRoot.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\ProgramFilesDir" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvProgramFilesDir.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\ProgramFilesDir (x86)" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvProgramFilesDirX86.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\CommonFilesDir" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvCommonFilesDir.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\CommonFilesDir (x86)" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvCommonFilesDirX86.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\ProgramData" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvProgramData.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\DriverData" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvDriverData.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\Select\Current" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsCurrentControlSet.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Nls\CodePage\ACP" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsCodePage.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Services\Tcpip\Parameters\Domain" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsDomain.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\TimeZoneInformation\TimeZoneKeyName" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsTimeZoneKeyName.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\TEMP" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvTemp.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\Path" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvPath.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\ComSpec" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvComSpec.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\windir" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvWindir.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\ProfilesDirectory" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsProfilesDirectory.__name__, ) args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\AllUsersProfile" self.CallClient( server_stubs.GetFileStat, args, next_state=self._ProcessWindowsEnvAllUsersProfile.__name__, ) args = rdf_file_finder.FileFinderArgs() # TODO: There is no dedicated action for obtaining registry # values but `STAT` action of the file-finder will get it. This should be # refactored once registry-specific actions are available. args.action.action_type = rdf_file_finder.FileFinderAction.Action.STAT args.pathtype = rdf_paths.PathSpec.PathType.REGISTRY args.paths = [r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\*\ProfileImagePath"] # TODO: remove this when the registry+sandboxing bug # is fixed. args.implementation_type = rdf_paths.PathSpec.ImplementationType.DIRECT self.CallClient( server_stubs.VfsFileFinder, args, next_state=self._ProcessWindowsProfiles.__name__, )
For each artifact, create subflows for each collector.
Start
python
google/grr
grr/server/grr_response_server/artifact.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py
Apache-2.0
def End(self) -> None: """Finish up.""" if self.client_os == "Windows": self.state.knowledge_base = mig_client.ToRDFKnowledgeBase( artifact_utils.ExpandKnowledgebaseWindowsEnvVars( mig_client.ToProtoKnowledgeBase(self.state.knowledge_base), ), ) # TODO: `%LOCALAPPDATA%` is a very often used variable that we # potentially not collect due to limitations of the Windows registry. For # now, in case we did not collect it, we set it to the default Windows value # (which should be the case almost always but is nevertheless not the most # way of handling it). # # Alternatively, we could develop a more general way of handling default # environment variable values in case they are missing. if self.client_os == "Windows": for user in self.state.knowledge_base.users: if not user.localappdata: self.Log( "Missing `%%LOCALAPPDATA%%` for '%s', using Windows default", user.username, ) user.localappdata = rf"{user.userprofile}\AppData\Local" self.SendReply(self.state.knowledge_base)
Finish up.
End
python
google/grr
grr/server/grr_response_server/artifact.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py
Apache-2.0
def InitializeKnowledgeBase(self): """Get the existing KB or create a new one if none exists.""" # Always create a new KB to override any old values but keep os and # version so we know which artifacts we can run. self.state.knowledge_base = rdf_client.KnowledgeBase() snapshot = data_store.REL_DB.ReadClientSnapshot(self.client_id) if not snapshot or not snapshot.knowledge_base: return kb = snapshot.knowledge_base state_kb = self.state.knowledge_base state_kb.os = kb.os state_kb.os_major_version = kb.os_major_version state_kb.os_minor_version = kb.os_minor_version if not state_kb.os_major_version and snapshot.os_version: version = snapshot.os_version.split(".") try: state_kb.os_major_version = int(version[0]) if len(version) > 1: state_kb.os_minor_version = int(version[1]) except ValueError: pass
Get the existing KB or create a new one if none exists.
InitializeKnowledgeBase
python
google/grr
grr/server/grr_response_server/artifact.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py
Apache-2.0
def UploadArtifactYamlFile( file_content, overwrite=True, overwrite_system_artifacts=False, ): """Upload a yaml or json file as an artifact to the datastore.""" loaded_artifacts = [] registry_obj = artifact_registry.REGISTRY # Make sure all artifacts are loaded so we don't accidentally overwrite one. registry_obj.GetArtifacts(reload_datastore_artifacts=True) new_artifacts = registry_obj.ArtifactsFromYaml(file_content) # A quick syntax check before we upload anything. for artifact_value in new_artifacts: artifact_registry.ValidateSyntax(artifact_value) for artifact_value in new_artifacts: registry_obj.RegisterArtifact( artifact_value, source="datastore", overwrite_if_exists=overwrite, overwrite_system_artifacts=overwrite_system_artifacts, ) data_store.REL_DB.WriteArtifact( mig_artifacts.ToProtoArtifact(artifact_value) ) loaded_artifacts.append(artifact_value) name = artifact_value.name logging.info("Uploaded artifact %s.", name) # Once all artifacts are loaded we can validate dependencies. Note that we do # not have to perform a syntax validation because it is already done after # YAML is parsed. for artifact_value in loaded_artifacts: artifact_registry.ValidateDependencies(artifact_value)
Upload a yaml or json file as an artifact to the datastore.
UploadArtifactYamlFile
python
google/grr
grr/server/grr_response_server/artifact.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py
Apache-2.0
def LoadArtifactsOnce(): """Loads artifacts from the datastore and from the filesystem. Datastore gets loaded second so it can override Artifacts in the files. """ artifact_registry.REGISTRY.AddDefaultSources()
Loads artifacts from the datastore and from the filesystem. Datastore gets loaded second so it can override Artifacts in the files.
LoadArtifactsOnce
python
google/grr
grr/server/grr_response_server/artifact.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/artifact.py
Apache-2.0
def _HostPrefix(client_id): """Build a host prefix for a notification message based on a client id.""" if not client_id: return "" hostname = None client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id) if client_snapshot: hostname = client_snapshot.knowledge_base.fqdn if hostname: return "%s: " % hostname else: return ""
Build a host prefix for a notification message based on a client id.
_HostPrefix
python
google/grr
grr/server/grr_response_server/notification.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/notification.py
Apache-2.0
def ClientIdFromObjectReference( object_reference: objects_pb2.ObjectReference, ) -> Optional[str]: """Returns the client ID from the given object reference, or None.""" if object_reference.reference_type == objects_pb2.ObjectReference.CLIENT: return object_reference.client.client_id elif object_reference.reference_type == objects_pb2.ObjectReference.FLOW: return object_reference.flow.client_id elif object_reference.reference_type == objects_pb2.ObjectReference.VFS_FILE: return object_reference.vfs_file.client_id elif ( object_reference.reference_type == objects_pb2.ObjectReference.APPROVAL_REQUEST and object_reference.approval_request.approval_type == objects_pb2.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CLIENT ): return object_reference.approval_request.subject_id else: return None
Returns the client ID from the given object reference, or None.
ClientIdFromObjectReference
python
google/grr
grr/server/grr_response_server/notification.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/notification.py
Apache-2.0
def Notify( username: str, notification_type: "objects_pb2.UserNotification.Type", message: str, object_reference: Optional[objects_pb2.ObjectReference], ) -> None: """Schedules a new-style REL_DB user notification.""" # Do not try to notify system users (e.g. Cron). if username in access_control.SYSTEM_USERS: return if object_reference: client_id = ClientIdFromObjectReference(object_reference) if client_id: message = _HostPrefix(client_id) + message n = objects_pb2.UserNotification( username=username, notification_type=notification_type, state=objects_pb2.UserNotification.State.STATE_PENDING, message=message, reference=object_reference, ) data_store.REL_DB.WriteUserNotification(n)
Schedules a new-style REL_DB user notification.
Notify
python
google/grr
grr/server/grr_response_server/notification.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/notification.py
Apache-2.0
def InitGRRRootAPI(): """Initializes the GRR root API.""" return api.GrrApi( connector=api_shell_raw_access_lib.RawConnector( context=api_call_context.ApiCallContext(username="GRRConfigUpdater"), page_size=_GRR_API_PAGE_SIZE, ) ).root
Initializes the GRR root API.
InitGRRRootAPI
python
google/grr
grr/server/grr_response_server/maintenance_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/maintenance_utils.py
Apache-2.0
def UploadSignedConfigBlob(content, aff4_path, client_context=None, limit=None): """Upload a signed blob into the datastore. Args: content: File content to upload. aff4_path: aff4 path to upload to. client_context: The configuration contexts to use. limit: The maximum size of the chunk to use. Raises: IOError: On failure to write. """ if limit is None: limit = config.CONFIG["Datastore.maximum_blob_size"] # Get the values of these parameters which apply to the client running on the # target platform. if client_context is None: # Default to the windows client. client_context = ["Platform:Windows", "Client Context"] config.CONFIG.Validate( parameters="PrivateKeys.executable_signing_private_key" ) signing_key = config.CONFIG.Get( "PrivateKeys.executable_signing_private_key", context=client_context ) verification_key = config.CONFIG.Get( "Client.executable_signing_public_key", context=client_context ) signed_binary_utils.WriteSignedBinary( rdfvalue.RDFURN(aff4_path), content, signing_key, public_key=verification_key, chunk_size=limit, ) logging.info("Uploaded to %s", aff4_path)
Upload a signed blob into the datastore. Args: content: File content to upload. aff4_path: aff4 path to upload to. client_context: The configuration contexts to use. limit: The maximum size of the chunk to use. Raises: IOError: On failure to write.
UploadSignedConfigBlob
python
google/grr
grr/server/grr_response_server/maintenance_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/maintenance_utils.py
Apache-2.0
def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id): """Will return True if hunt's task was assigned to this client before.""" flow_id = hunt_id try: cur_flow = data_store.REL_DB.ReadFlowObject(client_id, flow_id) except db.UnknownFlowError: return False if cur_flow.parent_hunt_id != hunt_id: raise RuntimeError( "Cannot start Hunt {} on {} because unrelated {} already exists." .format(hunt_id, client_id, cur_flow.long_flow_id) ) return True
Will return True if hunt's task was assigned to this client before.
_CheckIfHuntTaskWasAssigned
python
google/grr
grr/server/grr_response_server/foreman.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman.py
Apache-2.0
def _RunAction(self, rule, client_id): """Run all the actions specified in the rule. Args: rule: Rule which actions are to be executed. client_id: Id of a client where rule's actions are to be executed. Returns: Number of actions started. """ actions_count = 0 try: if self._CheckIfHuntTaskWasAssigned(client_id, rule.hunt_id): raise flow.CanNotStartFlowWithExistingIdError(client_id, rule.hunt_id) hunt.StartHuntFlowOnClient(client_id, rule.hunt_id) logging.info( "Foreman: Started hunt %s on client %s.", rule.hunt_id, client_id ) actions_count += 1 except flow.CanNotStartFlowWithExistingIdError: logging.info( "Foreman: ignoring hunt %s on client %s: was started here before", rule.hunt_id, client_id, ) # There could be all kinds of errors we don't know about when starting the # hunt so we catch everything here. except Exception as e: # pylint: disable=broad-except logging.exception( "Failure running hunt %s on client %s: %s", rule.hunt_id, client_id, e ) return actions_count
Run all the actions specified in the rule. Args: rule: Rule which actions are to be executed. client_id: Id of a client where rule's actions are to be executed. Returns: Number of actions started.
_RunAction
python
google/grr
grr/server/grr_response_server/foreman.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman.py
Apache-2.0
def AssignTasksToClient(self, client_id): """Examines our rules and starts up flows based on the client. Args: client_id: Client id of the client for tasks to be assigned. Returns: Number of assigned tasks. """ proto_rules = data_store.REL_DB.ReadAllForemanRules() rules = [ mig_foreman_rules.ToRDFForemanCondition(cond) for cond in proto_rules ] if not rules: return 0 last_foreman_run = self._GetLastForemanRunTime(client_id) latest_rule_creation_time = max(rule.creation_time for rule in rules) if latest_rule_creation_time > last_foreman_run: # Update the latest checked rule on the client. self._SetLastForemanRunTime(client_id, latest_rule_creation_time) relevant_rules = [] expired_rules = [] now = rdfvalue.RDFDatetime.Now() for rule in rules: if rule.expiration_time < now: expired_rules.append(rule) elif rule.creation_time > last_foreman_run: relevant_rules.append(rule) actions_count = 0 if relevant_rules: client_data = data_store.REL_DB.ReadClientFullInfo(client_id) if client_data is None: return client_data = mig_objects.ToRDFClientFullInfo(client_data) for rule in relevant_rules: if rule.Evaluate(client_data): actions_count += self._RunAction(rule, client_id) if expired_rules: for rule in expired_rules: hunt.CompleteHuntIfExpirationTimeReached(rule.hunt_id) data_store.REL_DB.RemoveExpiredForemanRules() return actions_count
Examines our rules and starts up flows based on the client. Args: client_id: Client id of the client for tasks to be assigned. Returns: Number of assigned tasks.
AssignTasksToClient
python
google/grr
grr/server/grr_response_server/foreman.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/foreman.py
Apache-2.0
def GetNewEventId(self, event_time=None): """Return a unique Event ID string.""" if event_time is None: event_time = int(time.time() * 1e6) return "%s:%s:%s" % (event_time, socket.gethostname(), os.getpid())
Return a unique Event ID string.
GetNewEventId
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def LogHttpAdminUIAccess(self, request, response): """Log an http based api call. Args: request: A WSGI request object. response: A WSGI response object. """ # TODO(user): generate event_id elsewhere and use it for all the log # messages that have to do with handling corresponding request. event_id = self.GetNewEventId() api_method = response.headers.get("X-API-Method", "unknown") api_reason = "none" if response.context: approval = response.context.approval if approval: api_reason = approval.reason log_msg = "%s API call [%s] by %s (reason: %s): %s [%d]" % ( event_id, api_method, request.user, api_reason, request.full_path, response.status_code, ) logging.info(log_msg) if response.headers.get("X-No-Log") != "True": entry = models_events.APIAuditEntryFromHttpRequestResponse( request, response ) data_store.REL_DB.WriteAPIAuditEntry(entry)
Log an http based api call. Args: request: A WSGI request object. response: A WSGI response object.
LogHttpAdminUIAccess
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def LogHttpFrontendAccess(self, request, source=None, message_count=None): """Write a log entry for a Frontend or UI Request. Args: request: A HttpRequest protobuf. source: Client id of the client initiating the request. Optional. message_count: Number of messages received from the client. Optional. """ # TODO(user): generate event_id elsewhere and use it for all the log # messages that have to do with handling corresponding request. event_id = self.GetNewEventId() log_msg = "%s-%s [%s]: %s %s %s %s (%d)" % ( event_id, request.source_ip, source or "<unknown>", request.method, request.url, request.user_agent, request.user, message_count or 0, ) logging.info(log_msg)
Write a log entry for a Frontend or UI Request. Args: request: A HttpRequest protobuf. source: Client id of the client initiating the request. Optional. message_count: Number of messages received from the client. Optional.
LogHttpFrontendAccess
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def flush(self): """Flush the buffer. This is called when the buffer is really full, we just just drop one oldest message. """ self.buffer = self.buffer[-self.capacity :]
Flush the buffer. This is called when the buffer is really full, we just just drop one oldest message.
flush
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def handleError(self, record): """Just ignore socket errors - the syslog server might come back."""
Just ignore socket errors - the syslog server might come back.
handleError
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def __init__(self, *args, **kwargs): """Initializes LogMetricsHandler.""" super().__init__(*args, **kwargs) self.setLevel(logging.ERROR)
Initializes LogMetricsHandler.
__init__
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def emit(self, record: logging.LogRecord): """Overrides Handler.emit().""" # From https://docs.python.org/3/library/logging.html#logging.Logger # logging.error() and logging.exception() log with level ERROR. # logging.critical() logs with level CRITICAL. if record.levelno == logging.ERROR: LOG_CALLS_COUNTER.Increment(fields=["ERROR"]) elif record.levelno == logging.CRITICAL: LOG_CALLS_COUNTER.Increment(fields=["CRITICAL"])
Overrides Handler.emit().
emit
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def InitErrorLogsMonitoring(): """Sets up error logs monitoring.""" logging.root.addHandler(ErrorLogsHandler()) logging.info("Initialized ErrorLogsHandler.")
Sets up error logs monitoring.
InitErrorLogsMonitoring
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def LogInit(): """Configure the logging subsystem.""" logging.debug("Initializing Logging subsystem.") # The root logger. logger = logging.getLogger() memory_handlers = [ m for m in logger.handlers if m.__class__.__name__ == "PreLoggingMemoryHandler" ] # Clear all handers. logger.handlers = list(GetLogHandlers()) SetLogLevels() # Now flush the old messages into the log files. for handler in memory_handlers: for record in handler.buffer: logger.handle(record)
Configure the logging subsystem.
LogInit
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def AppLogInit(): """Initialize the Application Log. This log is what will be used whenever someone does a log.LOGGER call. These are used for more detailed application or event logs. Returns: GrrApplicationLogger object """ logging.debug("Initializing Application Logger.") return GrrApplicationLogger()
Initialize the Application Log. This log is what will be used whenever someone does a log.LOGGER call. These are used for more detailed application or event logs. Returns: GrrApplicationLogger object
AppLogInit
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def ServerLoggingStartupInit(): """Initialize the server logging configuration.""" global LOGGER # `local_log` requires `Logging.path` configuration variable to be set. If it # is not, we fallback to normal logging (as specified in the config or flags). if local_log and config.CONFIG["Logging.path"]: logging.debug("Using local LogInit from %s", local_log) local_log.LogInit() logging.debug("Using local AppLogInit from %s", local_log) LOGGER = local_log.AppLogInit() else: LogInit() LOGGER = AppLogInit() InitErrorLogsMonitoring()
Initialize the server logging configuration.
ServerLoggingStartupInit
python
google/grr
grr/server/grr_response_server/server_logging.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/server_logging.py
Apache-2.0
def Run(self): """The actual cron job logic goes into this method."""
The actual cron job logic goes into this method.
Run
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def StartRun(self, wait_for_start_event, signal_event, wait_for_write_event): """Starts a new run for the given cron job.""" # Signal that the cron thread has started. This way the cron scheduler # will know that the task is not sitting in a threadpool queue, but is # actually executing. wait_for_start_event.set() # Wait until the cron scheduler acknowledges the run. If it doesn't # acknowledge, just return (it means that the cron scheduler considers # this task as "not started" and has returned the lease so that another # worker can pick it up). if not signal_event.wait(TASK_STARTUP_WAIT): return try: logging.info("Processing cron job: %s", self.job.cron_job_id) self.run_state.started_at = rdfvalue.RDFDatetime.Now() self.run_state.status = rdf_cronjobs.CronJobRun.CronJobRunStatus.RUNNING data_store.REL_DB.WriteCronJobRun( mig_cronjobs.ToProtoCronJobRun(self.run_state) ) data_store.REL_DB.UpdateCronJob( self.job.cron_job_id, last_run_time=rdfvalue.RDFDatetime.Now(), current_run_id=self.run_state.run_id, forced_run_requested=False, ) finally: # Notify the cron scheduler that all the DB updates are done. At this # point the cron scheduler can safely return this job's lease. wait_for_write_event.set() try: self.Run() self.run_state.status = rdf_cronjobs.CronJobRun.CronJobRunStatus.FINISHED except LifetimeExceededError: self.run_state.status = ( rdf_cronjobs.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED ) CRON_JOB_FAILURE.Increment(fields=[self.job.cron_job_id]) except Exception as e: # pylint: disable=broad-except logging.exception( "Cronjob %s failed with an error: %s", self.job.cron_job_id, e ) CRON_JOB_FAILURE.Increment(fields=[self.job.cron_job_id]) self.run_state.status = rdf_cronjobs.CronJobRun.CronJobRunStatus.ERROR self.run_state.backtrace = "{}\n\n{}".format(e, traceback.format_exc()) finally: self.run_state.finished_at = rdfvalue.RDFDatetime.Now() elapsed = self.run_state.finished_at - self.run_state.started_at CRON_JOB_LATENCY.RecordEvent( elapsed.ToFractional(rdfvalue.SECONDS), fields=[self.job.cron_job_id] ) if self.job.lifetime: expiration_time = self.run_state.started_at + self.job.lifetime if self.run_state.finished_at > expiration_time: self.run_state.status = ( rdf_cronjobs.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED ) CRON_JOB_TIMEOUT.Increment(fields=[self.job.cron_job_id]) data_store.REL_DB.WriteCronJobRun( mig_cronjobs.ToProtoCronJobRun(self.run_state) ) current_job = data_store.REL_DB.ReadCronJob(self.job.cron_job_id) # If no other job was started while we were running, update last status # information. if current_job.current_run_id == self.run_state.run_id: data_store.REL_DB.UpdateCronJob( self.job.cron_job_id, current_run_id=None, last_run_status=int(self.run_state.status), )
Starts a new run for the given cron job.
StartRun
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def HeartBeat(self): """Terminates a cronjob-run if it has exceeded its maximum runtime. This is a no-op for cronjobs that allow overruns. Raises: LifetimeExceededError: If the cronjob has exceeded its maximum runtime. """ # In prod, self.job.lifetime is guaranteed to always be set, and is # always equal to self.__class__.lifetime. Some tests however, do not # set the job lifetime, which isn't great. if self.allow_overruns or not self.job.lifetime: return runtime = rdfvalue.RDFDatetime.Now() - self.run_state.started_at if runtime > self.lifetime: raise LifetimeExceededError( "Cronjob run has exceeded the maximum runtime of %s." % self.lifetime )
Terminates a cronjob-run if it has exceeded its maximum runtime. This is a no-op for cronjobs that allow overruns. Raises: LifetimeExceededError: If the cronjob has exceeded its maximum runtime.
HeartBeat
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def CreateJob(self, cron_args=None, job_id=None, enabled=True): """Creates a cron job that runs given flow with a given frequency. Args: cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs. job_id: Use this job_id instead of an autogenerated unique name (used for system cron jobs - we want them to have well-defined persistent name). enabled: If False, the job object will be created, but will be disabled. Returns: URN of the cron job created. Raises: ValueError: This function expects an arg protobuf that starts a CreateAndRunGenericHuntFlow flow. If the args specify something else, ValueError is raised. """ if not cron_args.flow_name: raise ValueError("Unspecified flow name") if not job_id: # TODO: UInt16 is too small for randomly generated IDs. uid = random.UInt16() job_id = "%s_%s" % (cron_args.flow_name, uid) args = rdf_cronjobs.CronJobAction( action_type=rdf_cronjobs.CronJobAction.ActionType.HUNT_CRON_ACTION, hunt_cron_action=rdf_cronjobs.HuntCronAction( flow_name=cron_args.flow_name, flow_args=cron_args.flow_args, hunt_runner_args=cron_args.hunt_runner_args, ), ) # TODO: Refactor to proto-only. rdf_job = rdf_cronjobs.CronJob( cron_job_id=job_id, description=cron_args.description, frequency=cron_args.frequency, lifetime=cron_args.lifetime, allow_overruns=cron_args.allow_overruns, args=args, enabled=enabled, created_at=rdfvalue.RDFDatetime.Now(), ) proto_job = mig_cronjobs.ToProtoCronJob(rdf_job) data_store.REL_DB.WriteCronJob(proto_job) return job_id
Creates a cron job that runs given flow with a given frequency. Args: cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs. job_id: Use this job_id instead of an autogenerated unique name (used for system cron jobs - we want them to have well-defined persistent name). enabled: If False, the job object will be created, but will be disabled. Returns: URN of the cron job created. Raises: ValueError: This function expects an arg protobuf that starts a CreateAndRunGenericHuntFlow flow. If the args specify something else, ValueError is raised.
CreateJob
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def ListJobs(self) -> Sequence[str]: """Returns a list of ids of all currently running cron jobs.""" return [job.cron_job_id for job in data_store.REL_DB.ReadCronJobs()]
Returns a list of ids of all currently running cron jobs.
ListJobs
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def ReadJobs(self) -> Sequence[rdf_cronjobs.CronJob]: """Returns a list of all currently running cron jobs.""" protos = data_store.REL_DB.ReadCronJobs() return [mig_cronjobs.ToRDFCronJob(job) for job in protos]
Returns a list of all currently running cron jobs.
ReadJobs
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def EnableJob(self, job_id: str) -> None: """Enable cron job with the given id.""" return data_store.REL_DB.EnableCronJob(job_id)
Enable cron job with the given id.
EnableJob
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def DisableJob(self, job_id: str) -> None: """Disable cron job with the given id.""" return data_store.REL_DB.DisableCronJob(job_id)
Disable cron job with the given id.
DisableJob
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def DeleteJob(self, job_id: str) -> None: """Deletes cron job with the given URN.""" return data_store.REL_DB.DeleteCronJob(job_id)
Deletes cron job with the given URN.
DeleteJob
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def RunOnce(self, names: Sequence[str] = None) -> None: """Tries to lock and run cron jobs. Args: names: List of cron jobs to run. If unset, run them all. Raises: OneOrMoreCronJobsFailedError: if one or more individual cron jobs fail. Note: a failure of a single cron job doesn't preclude other cron jobs from running. """ proto_leased_jobs = data_store.REL_DB.LeaseCronJobs( cronjob_ids=names, lease_time=rdfvalue.Duration.From(10, rdfvalue.MINUTES), ) logging.info("Leased %d cron jobs for processing.", len(proto_leased_jobs)) if not proto_leased_jobs: return rdf_leased_jobs = [ mig_cronjobs.ToRDFCronJob(job) for job in proto_leased_jobs ] errors = {} processed_count = 0 for job in sorted(rdf_leased_jobs, key=lambda j: j.cron_job_id): if self.TerminateStuckRunIfNeeded(job): continue if not self.JobDueToRun(job): continue try: if self.RunJob(job): processed_count += 1 else: logging.info( "Can't schedule cron job %s on a thread pool " "(all threads are busy or CPU load is high)", job.cron_job_id, ) break except Exception as e: # pylint: disable=broad-except logging.exception("Cron job %s has failed: %s", job.cron_job_id, e) errors[job.cron_job_id] = e logging.info("Processed %d cron jobs.", processed_count) updated_proto_leased_jobs = [ mig_cronjobs.ToProtoCronJob(job) for job in rdf_leased_jobs ] data_store.REL_DB.ReturnLeasedCronJobs(updated_proto_leased_jobs) if errors: raise OneOrMoreCronJobsFailedError(errors)
Tries to lock and run cron jobs. Args: names: List of cron jobs to run. If unset, run them all. Raises: OneOrMoreCronJobsFailedError: if one or more individual cron jobs fail. Note: a failure of a single cron job doesn't preclude other cron jobs from running.
RunOnce
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def TerminateStuckRunIfNeeded(self, job: rdf_cronjobs.CronJob) -> None: """Cleans up job state if the last run is stuck.""" if job.current_run_id and job.last_run_time and job.lifetime: now = rdfvalue.RDFDatetime.Now() # We add additional 10 minutes to give the job run a chance to kill itself # during one of the HeartBeat calls (HeartBeat checks if a cron job is # run is running too long and raises if it is). expiration_time = ( job.last_run_time + job.lifetime + rdfvalue.Duration.From(10, rdfvalue.MINUTES) ) if now > expiration_time: proto_run = data_store.REL_DB.ReadCronJobRun( job.cron_job_id, job.current_run_id ) proto_run.status = ( flows_pb2.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED ) proto_run.finished_at = now.AsMicrosecondsSinceEpoch() data_store.REL_DB.WriteCronJobRun(proto_run) data_store.REL_DB.UpdateCronJob( job.cron_job_id, current_run_id=None, last_run_status=proto_run.status, ) CRON_JOB_LATENCY.RecordEvent( (now - job.last_run_time).ToFractional(rdfvalue.SECONDS), fields=[job.cron_job_id], ) CRON_JOB_TIMEOUT.Increment(fields=[job.cron_job_id]) return True return False
Cleans up job state if the last run is stuck.
TerminateStuckRunIfNeeded
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def RunJob(self, job: rdf_cronjobs.CronJob) -> None: """Does the actual work of the Cron, if the job is due to run. Args: job: The cronjob rdfvalue that should be run. Must be leased. Returns: A boolean indicating if this cron job was started or not. False may be returned when the threadpool is already full. Raises: LockError: if the object is not locked. ValueError: If the job argument is invalid. """ if not job.leased_until: raise LockError("CronJob must be leased for Run() to be called.") if job.leased_until < rdfvalue.RDFDatetime.Now(): raise LockError("CronJob lease expired for %s." % job.cron_job_id) logging.info("Starting cron job: %s", job.cron_job_id) if job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION: cls_name = job.args.system_cron_action.job_class_name job_cls = SystemCronJobRegistry.CronJobClassByName(cls_name) name = "%s runner" % cls_name elif job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION: job_cls = CronJobRegistry.CronJobClassByName("RunHunt") name = "Hunt runner" else: raise ValueError( "CronJob %s doesn't have a valid args type set." % job.cron_job_id ) run_state = rdf_cronjobs.CronJobRun( cron_job_id=job.cron_job_id, status="RUNNING" ) run_state.GenerateRunId() run_obj = job_cls(run_state, job) wait_for_start_event, signal_event, wait_for_write_event = ( threading.Event(), threading.Event(), threading.Event(), ) try: self._GetThreadPool().AddTask( target=run_obj.StartRun, args=(wait_for_start_event, signal_event, wait_for_write_event), name=name, blocking=False, inline=False, ) if not wait_for_start_event.wait(TASK_STARTUP_WAIT): logging.error( "Cron job run task for %s is too slow to start.", job.cron_job_id ) # Most likely the thread pool is full and the task is sitting on the # queue. Make sure we don't put more things on the queue by returning # False. return False # We know that the cron job task has started, unblock it by setting # the signal event. If signal_event is not set (this happens if the # task sits on a ThreadPool's queue doing nothing, see the # if-statement above) the task will just be a no-op when ThreadPool # finally gets to it. This way we can ensure that we can safely return # the lease and let another worker schedule the same job. signal_event.set() wait_for_write_event.wait(TASK_STARTUP_WAIT) return True except threadpool.Full: return False
Does the actual work of the Cron, if the job is due to run. Args: job: The cronjob rdfvalue that should be run. Must be leased. Returns: A boolean indicating if this cron job was started or not. False may be returned when the threadpool is already full. Raises: LockError: if the object is not locked. ValueError: If the job argument is invalid.
RunJob
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def JobIsRunning(self, job): """Returns True if there's a currently running iteration of this job.""" return bool(job.current_run_id)
Returns True if there's a currently running iteration of this job.
JobIsRunning
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def JobDueToRun(self, job: rdf_cronjobs.CronJob) -> bool: """Determines if the given job is due for another run. Args: job: The cron job rdfvalue object. Returns: True if it is time to run based on the specified frequency. """ if not job.enabled: return False if job.forced_run_requested: return True now = rdfvalue.RDFDatetime.Now() if ( job.last_run_time is not None and job.last_run_time + job.frequency > now ): return False # No currently executing job - lets go. if not job.current_run_id: return True # There is a job executing but we allow overruns. if job.allow_overruns: return True return False
Determines if the given job is due for another run. Args: job: The cron job rdfvalue object. Returns: True if it is time to run based on the specified frequency.
JobDueToRun
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def DeleteOldRuns( self, cutoff_timestamp: Optional[rdfvalue.RDFDatetime] = None ) -> None: """Deletes runs that were started before the timestamp given.""" if cutoff_timestamp is None: raise ValueError("cutoff_timestamp can't be None") return data_store.REL_DB.DeleteOldCronJobRuns( cutoff_timestamp=cutoff_timestamp )
Deletes runs that were started before the timestamp given.
DeleteOldRuns
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def ScheduleSystemCronJobs(names: Optional[Sequence[str]] = None) -> None: """Schedules all system cron jobs.""" errors = [] disabled_classes = config.CONFIG["Cron.disabled_cron_jobs"] for name in disabled_classes: try: SystemCronJobRegistry.CronJobClassByName(name) except ValueError: errors.append("Cron job not found: %s." % name) continue if names is None: names = SystemCronJobRegistry.SYSTEM_CRON_REGISTRY.keys() for name in names: cls = SystemCronJobRegistry.CronJobClassByName(name) enabled = cls.enabled and name not in disabled_classes system = rdf_cronjobs.CronJobAction.ActionType.SYSTEM_CRON_ACTION args = rdf_cronjobs.CronJobAction( action_type=system, system_cron_action=rdf_cronjobs.SystemCronAction(job_class_name=name), ) # TODO: Refactor to proto-only. rdf_job = rdf_cronjobs.CronJob( cron_job_id=name, args=args, enabled=enabled, frequency=cls.frequency, lifetime=cls.lifetime, allow_overruns=cls.allow_overruns, created_at=rdfvalue.RDFDatetime.Now(), ) proto_job = mig_cronjobs.ToProtoCronJob(rdf_job) data_store.REL_DB.WriteCronJob(proto_job) if errors: raise ValueError( "Error(s) while parsing Cron.disabled_cron_jobs: %s" % errors )
Schedules all system cron jobs.
ScheduleSystemCronJobs
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def Run(self): """Runs a working thread and waits for it to finish.""" self.RunAsync().join()
Runs a working thread and waits for it to finish.
Run
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def RunAsync(self): """Runs a working thread and returns immediately.""" self.running_thread = threading.Thread( name=self.thread_name, target=self._RunLoop ) self.running_thread.daemon = True self.running_thread.start() return self.running_thread
Runs a working thread and returns immediately.
RunAsync
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def InitializeCronWorkerOnce(): """Init hook for cron job worker.""" global _cron_worker # Start the cron thread if configured to. if config.CONFIG["Cron.active"]: _cron_worker = CronWorker() _cron_worker.RunAsync()
Init hook for cron job worker.
InitializeCronWorkerOnce
python
google/grr
grr/server/grr_response_server/cronjobs.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/cronjobs.py
Apache-2.0
def SendGrrMessageThroughFleetspeak( grr_id: str, grr_msg: rdf_flows.GrrMessage, ) -> None: """Sends the given GrrMessage through FS with retrying. The send operation is retried if a `grpc.RpcError` occurs. The maximum number of retries corresponds to the config value `Server.fleetspeak_send_retry_attempts`. A retry is delayed by the number of seconds specified in the config value `Server.fleetspeak_send_retry_sleep_time_secs`. Args: grr_id: ID of grr client to send message to. grr_msg: GRR message to send. """ fs_msg = fs_common_pb2.Message( message_type="GrrMessage", destination=fs_common_pb2.Address( client_id=GRRIDToFleetspeakID(grr_id), service_name="GRR" ), ) fs_msg.data.Pack(grr_msg.AsPrimitiveProto()) if grr_msg.session_id is not None: annotation = fs_msg.annotations.entries.add() annotation.key, annotation.value = "flow_id", grr_msg.session_id.Basename() if grr_msg.request_id is not None: annotation = fs_msg.annotations.entries.add() annotation.key, annotation.value = "request_id", str(grr_msg.request_id) fleetspeak_connector.CONN.outgoing.InsertMessage( fs_msg, single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT, timeout=WRITE_TOTAL_TIMEOUT, )
Sends the given GrrMessage through FS with retrying. The send operation is retried if a `grpc.RpcError` occurs. The maximum number of retries corresponds to the config value `Server.fleetspeak_send_retry_attempts`. A retry is delayed by the number of seconds specified in the config value `Server.fleetspeak_send_retry_sleep_time_secs`. Args: grr_id: ID of grr client to send message to. grr_msg: GRR message to send.
SendGrrMessageThroughFleetspeak
python
google/grr
grr/server/grr_response_server/fleetspeak_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py
Apache-2.0
def SendRrgRequest( client_id: str, request: rrg_pb2.Request, ) -> None: """Sends a RRG action request to the specified endpoint. Args: client_id: A unique endpoint identifier as recognized by GRR. request: A request to send to the endpoint. """ message = fs_common_pb2.Message() message.message_type = "rrg.Request" message.destination.service_name = "RRG" message.destination.client_id = GRRIDToFleetspeakID(client_id) message.data.Pack(request) # It is not entirely clear to me why we set these annotations below, but # messages sent to Python agents do it, so we should do it as well. message.annotations.entries.add( key="flow_id", value=str(request.flow_id), ) message.annotations.entries.add( key="request_id", value=str(request.request_id), ) fleetspeak_connector.CONN.outgoing.InsertMessage( message, single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT, timeout=WRITE_TOTAL_TIMEOUT, )
Sends a RRG action request to the specified endpoint. Args: client_id: A unique endpoint identifier as recognized by GRR. request: A request to send to the endpoint.
SendRrgRequest
python
google/grr
grr/server/grr_response_server/fleetspeak_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py
Apache-2.0
def KillFleetspeak(grr_id: str, force: bool) -> None: """Kills Fleespeak on the given client.""" die_req = fs_system_pb2.DieRequest(force=force) fs_msg = fs_common_pb2.Message() fs_msg.message_type = "Die" fs_msg.destination.client_id = GRRIDToFleetspeakID(grr_id) fs_msg.destination.service_name = "system" fs_msg.data.Pack(die_req) fleetspeak_connector.CONN.outgoing.InsertMessage( fs_msg, single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT, timeout=WRITE_TOTAL_TIMEOUT, )
Kills Fleespeak on the given client.
KillFleetspeak
python
google/grr
grr/server/grr_response_server/fleetspeak_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py
Apache-2.0
def RestartFleetspeakGrrService(grr_id: str) -> None: """Restarts the GRR service on the given client.""" restart_req = fs_system_pb2.RestartServiceRequest(name="GRR") fs_msg = fs_common_pb2.Message() fs_msg.message_type = "RestartService" fs_msg.destination.client_id = GRRIDToFleetspeakID(grr_id) fs_msg.destination.service_name = "system" fs_msg.data.Pack(restart_req) fleetspeak_connector.CONN.outgoing.InsertMessage( fs_msg, single_try_timeout=WRITE_SINGLE_TRY_TIMEOUT, timeout=WRITE_TOTAL_TIMEOUT, )
Restarts the GRR service on the given client.
RestartFleetspeakGrrService
python
google/grr
grr/server/grr_response_server/fleetspeak_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py
Apache-2.0
def TSToRDFDatetime(ts): """Convert a protobuf.Timestamp to an RDFDatetime.""" return rdfvalue.RDFDatetime(ts.seconds * 1000000 + ts.nanos // 1000)
Convert a protobuf.Timestamp to an RDFDatetime.
TSToRDFDatetime
python
google/grr
grr/server/grr_response_server/fleetspeak_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py
Apache-2.0
def GetLabelsFromFleetspeak(client_id): """Returns labels for a Fleetspeak-enabled client. Fleetspeak-enabled clients delegate labeling to Fleetspeak, as opposed to using labels in the GRR config. Args: client_id: Id of the client to fetch Fleetspeak labels for. Returns: A list of client labels. """ res = fleetspeak_connector.CONN.outgoing.ListClients( admin_pb2.ListClientsRequest(client_ids=[GRRIDToFleetspeakID(client_id)]), single_try_timeout=READ_SINGLE_TRY_TIMEOUT, timeout=READ_TOTAL_TIMEOUT, ) if not res.clients or not res.clients[0].labels: return [] grr_labels = [] label_prefix = config.CONFIG["Server.fleetspeak_label_prefix"] for fs_label in res.clients[0].labels: if fs_label.service_name != "client" or ( label_prefix and not fs_label.label.startswith(label_prefix) ): continue try: grr_labels.append(fleetspeak_connector.label_map[fs_label.label]) except KeyError: grr_labels.append(fs_label.label) return grr_labels
Returns labels for a Fleetspeak-enabled client. Fleetspeak-enabled clients delegate labeling to Fleetspeak, as opposed to using labels in the GRR config. Args: client_id: Id of the client to fetch Fleetspeak labels for. Returns: A list of client labels.
GetLabelsFromFleetspeak
python
google/grr
grr/server/grr_response_server/fleetspeak_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py
Apache-2.0
def FetchClientResourceUsageRecords( client_id: str, start_range: timestamp_pb2.Timestamp, end_range: timestamp_pb2.Timestamp, ) -> List[resource_pb2.ClientResourceUsageRecord]: """Returns aggregated resource usage metrics of a client from Fleetspeak. Args: client_id: Id of the client to fetch Fleetspeak resource usage records for. start_range: Start timestamp of range. end_range: end timestamp of range. Returns: A list of client resource usage records retrieved from Fleetspeak. """ res = fleetspeak_connector.CONN.outgoing.FetchClientResourceUsageRecords( admin_pb2.FetchClientResourceUsageRecordsRequest( client_id=GRRIDToFleetspeakID(client_id), start_timestamp=start_range, end_timestamp=end_range, ), single_try_timeout=READ_SINGLE_TRY_TIMEOUT, timeout=READ_TOTAL_TIMEOUT, ) if not res.records: return [] return list(res.records)
Returns aggregated resource usage metrics of a client from Fleetspeak. Args: client_id: Id of the client to fetch Fleetspeak resource usage records for. start_range: Start timestamp of range. end_range: end timestamp of range. Returns: A list of client resource usage records retrieved from Fleetspeak.
FetchClientResourceUsageRecords
python
google/grr
grr/server/grr_response_server/fleetspeak_utils.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/fleetspeak_utils.py
Apache-2.0
def __init__(self, certificate=None, private_key=None): """Creates a communicator. Args: certificate: Our own certificate. private_key: Our own private key. """ self.private_key = private_key self.certificate = certificate self._ClearServerCipherCache() # A cache for encrypted ciphers self.encrypted_cipher_cache = utils.FastStore(max_size=50000)
Creates a communicator. Args: certificate: Our own certificate. private_key: Our own private key.
__init__
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def EncodeMessageList(cls, message_list, packed_message_list): """Encode the MessageList into the packed_message_list rdfvalue.""" # By default uncompress uncompressed_data = message_list.SerializeToBytes() packed_message_list.message_list = uncompressed_data compressed_data = zlib.compress(uncompressed_data) # Only compress if it buys us something. if len(compressed_data) < len(uncompressed_data): packed_message_list.compression = ( rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION ) packed_message_list.message_list = compressed_data
Encode the MessageList into the packed_message_list rdfvalue.
EncodeMessageList
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def _GetServerCipher(self): """Returns the cipher for self.server_name.""" if self.server_cipher is not None: expiry = self.server_cipher_age + rdfvalue.Duration.From(1, rdfvalue.DAYS) if expiry > rdfvalue.RDFDatetime.Now(): return self.server_cipher remote_public_key = self._GetRemotePublicKey(self.server_name) self.server_cipher = communicator.Cipher( self.common_name, self.private_key, remote_public_key ) self.server_cipher_age = rdfvalue.RDFDatetime.Now() return self.server_cipher
Returns the cipher for self.server_name.
_GetServerCipher
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def EncodeMessages( self, message_list, result, destination=None, timestamp=None, api_version=3, ): """Accepts a list of messages and encodes for transmission. This function signs and then encrypts the payload. Args: message_list: A MessageList rdfvalue containing a list of GrrMessages. result: A ClientCommunication rdfvalue which will be filled in. destination: The CN of the remote system this should go to. timestamp: A timestamp to use for the signed messages. If None - use the current time. api_version: The api version which this should be encoded in. Returns: A nonce (based on time) which is inserted to the encrypted payload. The client can verify that the server is able to decrypt the message and return the nonce. Raises: RuntimeError: If we do not support this api version. """ if api_version not in [3]: raise RuntimeError( "Unsupported api version: %s, expected 3." % api_version ) remote_public_key = self._GetRemotePublicKey(destination) cipher = communicator.Cipher( self.common_name, self.private_key, remote_public_key ) # Make a nonce for this transaction if timestamp is None: self.timestamp = timestamp = int(time.time() * 1000000) packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp) self.EncodeMessageList(message_list, packed_message_list) result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata # Include the encrypted cipher. result.encrypted_cipher = cipher.encrypted_cipher serialized_message_list = packed_message_list.SerializeToBytes() # Encrypt the message symmetrically. # New scheme cipher is signed plus hmac over message list. result.packet_iv, result.encrypted = cipher.Encrypt(serialized_message_list) # This is to support older endpoints. result.hmac = cipher.HMAC(result.encrypted) # Newer endpoints only look at this HMAC. It is recalculated for each packet # in the session. Note that encrypted_cipher and encrypted_cipher_metadata # do not change between all packets in this session. result.full_hmac = cipher.HMAC( result.encrypted, result.encrypted_cipher, result.encrypted_cipher_metadata, result.packet_iv.SerializeToBytes(), struct.pack("<I", api_version), ) result.api_version = api_version if isinstance(result, rdfvalue.RDFValue): # Store the number of messages contained. result.num_messages = len(message_list) return timestamp
Accepts a list of messages and encodes for transmission. This function signs and then encrypts the payload. Args: message_list: A MessageList rdfvalue containing a list of GrrMessages. result: A ClientCommunication rdfvalue which will be filled in. destination: The CN of the remote system this should go to. timestamp: A timestamp to use for the signed messages. If None - use the current time. api_version: The api version which this should be encoded in. Returns: A nonce (based on time) which is inserted to the encrypted payload. The client can verify that the server is able to decrypt the message and return the nonce. Raises: RuntimeError: If we do not support this api version.
EncodeMessages
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def DecryptMessage(self, encrypted_response): """Decrypt the serialized, encrypted string. Args: encrypted_response: A serialized and encrypted string. Returns: a Packed_Message_List rdfvalue """ try: response_comms = rdf_flows.ClientCommunication.FromSerializedBytes( encrypted_response ) return self.DecodeMessages(response_comms) except ( rdfvalue.DecodeError, type_info.TypeValueError, ValueError, AttributeError, ) as e: raise DecodingError("Error while decrypting messages: %s" % e)
Decrypt the serialized, encrypted string. Args: encrypted_response: A serialized and encrypted string. Returns: a Packed_Message_List rdfvalue
DecryptMessage
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def DecompressMessageList(cls, packed_message_list): """Decompress the message data from packed_message_list. Args: packed_message_list: A PackedMessageList rdfvalue with some data in it. Returns: a MessageList rdfvalue. Raises: DecodingError: If decompression fails. """ compression = packed_message_list.compression if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED: data = packed_message_list.message_list elif ( compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION ): try: data = zlib.decompress(packed_message_list.message_list) except zlib.error as e: raise DecodingError("Failed to decompress: %s" % e) else: raise DecodingError("Compression scheme not supported") try: result = rdf_flows.MessageList.FromSerializedBytes(data) except rdfvalue.DecodeError: raise DecodingError("RDFValue parsing failed.") return result
Decompress the message data from packed_message_list. Args: packed_message_list: A PackedMessageList rdfvalue with some data in it. Returns: a MessageList rdfvalue. Raises: DecodingError: If decompression fails.
DecompressMessageList
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def DecodeMessages(self, response_comms): """Extract and verify server message. Args: response_comms: A ClientCommunication rdfvalue Returns: list of messages and the CN where they came from. Raises: DecryptionError: If the message failed to decrypt properly. """ # Have we seen this cipher before? cipher_verified = False try: cipher = self.encrypted_cipher_cache.Get(response_comms.encrypted_cipher) GRR_ENCRYPTED_CIPHER_CACHE.Increment(fields=["hits"]) # Even though we have seen this encrypted cipher already, we should still # make sure that all the other fields are sane and verify the HMAC. cipher.VerifyReceivedHMAC(response_comms) cipher_verified = True # If we have the cipher in the cache, we know the source and # should have a corresponding public key. source = cipher.GetSource() remote_public_key = self._GetRemotePublicKey(source) except KeyError: GRR_ENCRYPTED_CIPHER_CACHE.Increment(fields=["misses"]) cipher = communicator.ReceivedCipher(response_comms, self.private_key) source = cipher.GetSource() try: remote_public_key = self._GetRemotePublicKey(source) if cipher.VerifyCipherSignature(remote_public_key): # At this point we know this cipher is legit, we can cache it. self.encrypted_cipher_cache.Put( response_comms.encrypted_cipher, cipher ) cipher_verified = True except UnknownClientCertError: # We don't know who we are talking to. remote_public_key = None # Decrypt the message with the per packet IV. plain = cipher.Decrypt(response_comms.encrypted, response_comms.packet_iv) try: packed_message_list = rdf_flows.PackedMessageList.FromSerializedBytes( plain ) except rdfvalue.DecodeError as e: raise DecryptionError(e) message_list = self.DecompressMessageList(packed_message_list) # Are these messages authenticated? # fmt: off auth_state = self.VerifyMessageSignature( response_comms, packed_message_list, cipher, cipher_verified, response_comms.api_version, remote_public_key) # fmt: on # Mark messages as authenticated and where they came from. for msg in message_list.job: msg.auth_state = auth_state msg.source = cipher.cipher_metadata.source return ( message_list.job, cipher.cipher_metadata.source, packed_message_list.timestamp, )
Extract and verify server message. Args: response_comms: A ClientCommunication rdfvalue Returns: list of messages and the CN where they came from. Raises: DecryptionError: If the message failed to decrypt properly.
DecodeMessages
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def VerifyMessageSignature( self, unused_response_comms, packed_message_list, cipher, cipher_verified, api_version, remote_public_key, ): """Verify the message list signature. This is the way the messages are verified in the client. In the client we also check that the nonce returned by the server is correct (the timestamp doubles as a nonce). If the nonce fails we deem the response unauthenticated since it might have resulted from a replay attack. Args: packed_message_list: The PackedMessageList rdfvalue from the server. cipher: The cipher belonging to the remote end. cipher_verified: If True, the cipher's signature is not verified again. api_version: The api version we should use. remote_public_key: The public key of the source. Returns: An rdf_flows.GrrMessage.AuthorizationState. Raises: DecryptionError: if the message is corrupt. """ # This is not used atm since we only support a single api version (3). _ = api_version result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED if cipher_verified or cipher.VerifyCipherSignature(remote_public_key): GRR_AUTHENTICATED_MESSAGES.Increment() result = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED # Check for replay attacks. We expect the server to return the same # timestamp nonce we sent. if packed_message_list.timestamp != self.timestamp: # pytype: disable=attribute-error result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED if not cipher.cipher_metadata: # Fake the metadata cipher.cipher_metadata = rdf_flows.CipherMetadata( source=packed_message_list.source ) return result
Verify the message list signature. This is the way the messages are verified in the client. In the client we also check that the nonce returned by the server is correct (the timestamp doubles as a nonce). If the nonce fails we deem the response unauthenticated since it might have resulted from a replay attack. Args: packed_message_list: The PackedMessageList rdfvalue from the server. cipher: The cipher belonging to the remote end. cipher_verified: If True, the cipher's signature is not verified again. api_version: The api version we should use. remote_public_key: The public key of the source. Returns: An rdf_flows.GrrMessage.AuthorizationState. Raises: DecryptionError: if the message is corrupt.
VerifyMessageSignature
python
google/grr
grr/server/grr_response_server/communicator.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/communicator.py
Apache-2.0
def ReceiveFetchedFileStat( self, stat_entry: rdf_client_fs.StatEntry, request_data: Optional[Mapping[str, Any]] = None, ): """This method will be called for each new file stat successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. """ del request_data # Unused. if self.state.stop_at_stat: status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED self.state.progress.num_in_progress -= 1 self.state.progress.num_collected += 1 else: status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGRESS result = rdf_file_finder.CollectFilesByKnownPathResult( stat=stat_entry, status=status ) self.SendReply(result)
This method will be called for each new file stat successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call.
ReceiveFetchedFileStat
python
google/grr
grr/server/grr_response_server/flows/file.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py
Apache-2.0
def ReceiveFetchedFileHash( self, stat_entry: rdf_client_fs.StatEntry, file_hash: rdf_crypto.Hash, request_data: Optional[Mapping[str, Any]] = None, ): """This method will be called for each new file hash successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. file_hash: rdf_crypto.Hash object with file hashes. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. """ del request_data # Unused. if self.state.stop_at_hash: status = rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED self.state.progress.num_in_progress -= 1 self.state.progress.num_collected += 1 else: status = rdf_file_finder.CollectFilesByKnownPathResult.Status.IN_PROGRESS result = rdf_file_finder.CollectFilesByKnownPathResult( stat=stat_entry, hash=file_hash, status=status ) self.SendReply(result)
This method will be called for each new file hash successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. file_hash: rdf_crypto.Hash object with file hashes. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call.
ReceiveFetchedFileHash
python
google/grr
grr/server/grr_response_server/flows/file.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py
Apache-2.0
def ReceiveFetchedFile( self, stat_entry: rdf_client_fs.StatEntry, file_hash: rdf_crypto.Hash, request_data: Optional[Mapping[str, Any]] = None, is_duplicate: bool = False, ): """This method will be called for each new file successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. file_hash: rdf_crypto.Hash object with file hashes. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. is_duplicate: If True, the file wasn't actually collected as its hash was found in the filestore. """ del request_data, is_duplicate # Unused. result = rdf_file_finder.CollectFilesByKnownPathResult( stat=stat_entry, hash=file_hash, status=rdf_file_finder.CollectFilesByKnownPathResult.Status.COLLECTED, ) self.SendReply(result) self.state.progress.num_in_progress -= 1 self.state.progress.num_collected += 1
This method will be called for each new file successfully fetched. Args: stat_entry: rdf_client_fs.StatEntry object describing the file. file_hash: rdf_crypto.Hash object with file hashes. request_data: Arbitrary dictionary that was passed to the corresponding StartFileFetch call. is_duplicate: If True, the file wasn't actually collected as its hash was found in the filestore.
ReceiveFetchedFile
python
google/grr
grr/server/grr_response_server/flows/file.py
https://github.com/google/grr/blob/master/grr/server/grr_response_server/flows/file.py
Apache-2.0