id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
189,060 | from typing import TYPE_CHECKING
from django.conf import settings
def is_root_user(*, request: "HttpRequest", user: "User") -> bool:
root = (
hasattr(settings, "ROOT_USER")
and request.user != user
and user.username == settings.ROOT_USER
)
demo = (
getattr(settings, "DEMO", False) and request.user.username == settings.ROOT_USER
)
return root or demo | null |
189,061 | from django.db import migrations
def link_agents_to_users(apps, schema_editor):
Agent = apps.get_model("agents", "Agent")
User = apps.get_model("accounts", "User")
for agent in Agent.objects.all():
user = User.objects.filter(username=agent.agent_id).first()
if user:
user.agent = agent
user.save() | null |
189,062 | import django.db.models.deletion
from django.db import migrations, models
def delete_alerts_without_agent(apps, schema):
Alert = apps.get_model("alerts", "Alert")
Alert.objects.filter(agent=None).delete() | null |
189,063 | from typing import TYPE_CHECKING
from django.shortcuts import get_object_or_404
from rest_framework import permissions
from tacticalrmm.permissions import _has_perm, _has_perm_on_agent
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
def _has_perm_on_alert(user: "User", id: int) -> bool:
from alerts.models import Alert
role = user.role
if user.is_superuser or (role and getattr(role, "is_superuser")):
return True
# make sure non-superusers with empty roles aren't permitted
elif not role:
return False
alert = get_object_or_404(Alert, id=id)
if alert.agent:
agent_id = alert.agent.agent_id
else:
return True
return _has_perm_on_agent(user, agent_id) | null |
189,064 | from django.utils import timezone as djangotime
from agents.models import Agent
from tacticalrmm.celery import app
from .models import Alert
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
def unsnooze_alerts() -> str:
Alert.objects.filter(snoozed=True, snooze_until__lte=djangotime.now()).update(
snoozed=False, snooze_until=None
)
return "ok" | null |
189,065 | from django.utils import timezone as djangotime
from agents.models import Agent
from tacticalrmm.celery import app
from .models import Alert
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def cache_agents_alert_template() -> str:
for agent in Agent.objects.only(
"pk", "site", "policy", "alert_template"
).select_related("site", "policy", "alert_template"):
agent.set_alert_template()
return "ok" | null |
189,066 | import random
from django.conf import settings
from tacticalrmm.structs import AgentCheckInConfig
def get_agent_config() -> AgentCheckInConfig:
return AgentCheckInConfig(
checkin_hello=random.randint(*getattr(settings, "CHECKIN_HELLO", (30, 60))),
checkin_agentinfo=random.randint(
*getattr(settings, "CHECKIN_AGENTINFO", (200, 400))
),
checkin_winsvc=random.randint(
*getattr(settings, "CHECKIN_WINSVC", (2400, 3000))
),
checkin_pubip=random.randint(*getattr(settings, "CHECKIN_PUBIP", (300, 500))),
checkin_disks=random.randint(*getattr(settings, "CHECKIN_DISKS", (1000, 2000))),
checkin_sw=random.randint(*getattr(settings, "CHECKIN_SW", (2800, 3500))),
checkin_wmi=random.randint(*getattr(settings, "CHECKIN_WMI", (3000, 4000))),
checkin_syncmesh=random.randint(
*getattr(settings, "CHECKIN_SYNCMESH", (800, 1200))
),
limit_data=getattr(settings, "LIMIT_DATA", False),
install_nushell=getattr(settings, "INSTALL_NUSHELL", False),
install_nushell_version=getattr(settings, "INSTALL_NUSHELL_VERSION", ""),
install_nushell_url=getattr(settings, "INSTALL_NUSHELL_URL", ""),
nushell_enable_config=getattr(settings, "NUSHELL_ENABLE_CONFIG", False),
install_deno=getattr(settings, "INSTALL_DENO", False),
install_deno_version=getattr(settings, "INSTALL_DENO_VERSION", ""),
install_deno_url=getattr(settings, "INSTALL_DENO_URL", ""),
deno_default_permissions=getattr(settings, "DENO_DEFAULT_PERMISSIONS", ""),
) | null |
189,067 | import json
import subprocess
import tempfile
import urllib.parse
from base64 import b64encode
from typing import TYPE_CHECKING, Optional, cast
import requests
import websockets
from django.conf import settings
from django.core.cache import cache
from django.http import FileResponse
from meshctrl.utils import get_auth_token
from tacticalrmm.constants import (
AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX,
CORESETTINGS_CACHE_KEY,
ROLE_CACHE_PREFIX,
TRMM_WS_MAX_SIZE,
AgentPlat,
MeshAgentIdent,
)
def download_mesh_agent(dl_url: str) -> FileResponse:
with tempfile.NamedTemporaryFile(prefix="mesh-", dir=settings.EXE_DIR) as fp:
r = requests.get(dl_url, stream=True, timeout=15)
with open(fp.name, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
del r
return FileResponse(open(fp.name, "rb"), as_attachment=True, filename=fp.name) | null |
189,068 | import json
import subprocess
import tempfile
import urllib.parse
from base64 import b64encode
from typing import TYPE_CHECKING, Optional, cast
import requests
import websockets
from django.conf import settings
from django.core.cache import cache
from django.http import FileResponse
from meshctrl.utils import get_auth_token
from tacticalrmm.constants import (
AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX,
CORESETTINGS_CACHE_KEY,
ROLE_CACHE_PREFIX,
TRMM_WS_MAX_SIZE,
AgentPlat,
MeshAgentIdent,
)
def _b64_to_hex(h: str) -> str:
return b64encode(bytes.fromhex(h)).decode().replace(r"/", "$").replace(r"+", "@")
async def send_command_with_mesh(
cmd: str, uri: str, mesh_node_id: str, shell: int, run_as_user: int
) -> None:
node_id = _b64_to_hex(mesh_node_id)
async with websockets.connect(uri) as ws:
await ws.send(
json.dumps(
{
"action": "runcommands",
"cmds": cmd,
"nodeids": [f"node//{node_id}"],
"runAsUser": run_as_user,
"type": shell,
"responseid": "trmm",
}
)
) | null |
189,069 | import json
import subprocess
import tempfile
import urllib.parse
from base64 import b64encode
from typing import TYPE_CHECKING, Optional, cast
import requests
import websockets
from django.conf import settings
from django.core.cache import cache
from django.http import FileResponse
from meshctrl.utils import get_auth_token
from tacticalrmm.constants import (
AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX,
CORESETTINGS_CACHE_KEY,
ROLE_CACHE_PREFIX,
TRMM_WS_MAX_SIZE,
AgentPlat,
MeshAgentIdent,
)
def _b64_to_hex(h: str) -> str:
return b64encode(bytes.fromhex(h)).decode().replace(r"/", "$").replace(r"+", "@")
async def remove_mesh_agent(uri: str, mesh_node_id: str) -> None:
node_id = _b64_to_hex(mesh_node_id)
async with websockets.connect(uri) as ws:
await ws.send(
json.dumps(
{
"action": "removedevices",
"nodeids": [f"node//{node_id}"],
"responseid": "trmm",
}
)
) | null |
189,070 | import json
import subprocess
import tempfile
import urllib.parse
from base64 import b64encode
from typing import TYPE_CHECKING, Optional, cast
import requests
import websockets
from django.conf import settings
from django.core.cache import cache
from django.http import FileResponse
from meshctrl.utils import get_auth_token
from tacticalrmm.constants import (
AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX,
CORESETTINGS_CACHE_KEY,
ROLE_CACHE_PREFIX,
TRMM_WS_MAX_SIZE,
AgentPlat,
MeshAgentIdent,
)
def get_meshagent_url(
*, ident: "MeshAgentIdent", plat: str, mesh_site: str, mesh_device_id: str
) -> str:
if settings.DOCKER_BUILD:
base = settings.MESH_WS_URL.replace("ws://", "http://")
elif getattr(settings, "TRMM_INSECURE", False):
base = mesh_site.replace("https", "http") + ":4430"
else:
base = mesh_site
if plat == AgentPlat.WINDOWS:
params = {
"id": ident,
"meshid": mesh_device_id,
"installflags": 0,
}
else:
params = {
"id": mesh_device_id,
"installflags": 2,
"meshinstall": ident,
}
return base + "/meshagents?" + urllib.parse.urlencode(params) | null |
189,071 | from django.db import migrations
def update_hide_in_summary(apps, schema_editor):
CustomField = apps.get_model("core", "CustomField")
for field in CustomField.objects.filter(hide_in_ui=True):
field.hide_in_summary = True
field.save(update_fields=["hide_in_summary"]) | null |
189,072 | import json
from django.conf import settings
from django.http import HttpResponse
def monitoring_view(function):
def wrap(request, *args, **kwargs):
if request.method != "POST":
return HttpResponse("Invalid request type\n", status=400)
try:
data = json.loads(request.body)
except:
return HttpResponse("Invalid json\n", status=400)
if "auth" not in data.keys():
return HttpResponse("Invalid payload\n", status=400)
token = getattr(settings, "MON_TOKEN", "")
if not token:
return HttpResponse("Missing token\n", status=401)
if data.get("auth") != token:
return HttpResponse("Not authenticated\n", status=401)
return function(request, *args, **kwargs)
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap | null |
189,073 | import asyncio
import traceback
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Any
import nats
from django.conf import settings
from django.db import transaction
from django.db.models import Prefetch
from django.db.utils import DatabaseError
from django.utils import timezone as djangotime
from packaging import version as pyver
from accounts.models import User
from accounts.utils import is_superuser
from agents.models import Agent
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.models import Alert
from alerts.tasks import prune_resolved_alerts
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckHistory, CheckResult
from checks.tasks import prune_check_history
from clients.models import Client, Site
from core.mesh_utils import (
MeshSync,
build_mesh_display_name,
has_mesh_perms,
transform_mesh,
transform_trmm,
)
from core.models import CoreSettings
from core.utils import get_core_settings, get_mesh_ws_url
from logs.models import PendingAction
from logs.tasks import prune_audit_log, prune_debug_log
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_ONLINE,
AGENT_STATUS_OVERDUE,
RESOLVE_ALERTS_LOCK,
SYNC_MESH_PERMS_TASK_LOCK,
SYNC_SCHED_TASK_LOCK,
AlertSeverity,
AlertType,
PAAction,
PAStatus,
TaskStatus,
TaskSyncStatus,
TaskType,
)
from tacticalrmm.helpers import make_random_password, setup_nats_options
from tacticalrmm.logger import logger
from tacticalrmm.nats_utils import a_nats_cmd
from tacticalrmm.permissions import _has_perm_on_agent
from tacticalrmm.utils import redis_lock
def remove_orphaned_history_results() -> int:
try:
with transaction.atomic():
check_hist_agentids = CheckHistory.objects.values_list(
"agent_id", flat=True
).distinct()
current_agentids = set(Agent.objects.values_list("agent_id", flat=True))
orphaned_agentids = [
i for i in check_hist_agentids if i not in current_agentids
]
count, _ = CheckHistory.objects.filter(
agent_id__in=orphaned_agentids
).delete()
return count
except Exception as e:
logger.error(str(e))
return 0
def clear_faults_task(older_than_days: int) -> None:
from alerts.models import Alert
# https://github.com/amidaware/tacticalrmm/issues/484
agents = Agent.objects.exclude(last_seen__isnull=True).filter(
last_seen__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
)
for agent in agents:
for check in agent.get_checks_with_policies():
# reset check status
if check.check_result:
check.check_result.status = CheckStatus.PASSING
check.check_result.save(update_fields=["status"])
if check.alert.filter(agent=agent, resolved=False).exists():
alert = Alert.create_or_return_check_alert(check, agent=agent)
if alert:
alert.resolve()
# reset overdue alerts
agent.overdue_email_alert = False
agent.overdue_text_alert = False
agent.overdue_dashboard_alert = False
agent.save(
update_fields=[
"overdue_email_alert",
"overdue_text_alert",
"overdue_dashboard_alert",
]
)
def prune_agent_history(older_than_days: int) -> str:
from .models import AgentHistory
AgentHistory.objects.filter(
time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"
def prune_resolved_alerts(older_than_days: int) -> str:
Alert.objects.filter(resolved=True).filter(
alert_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"
class AutomatedTask(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="autotasks",
on_delete=models.CASCADE,
null=True,
blank=True,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.CASCADE,
)
custom_field = models.ForeignKey(
"core.CustomField",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
# format -> [{"type": "script", "script": 1, "name": "Script Name", "timeout": 90, "script_args": [], "env_vars": []}, {"type": "cmd", "command": "whoami", "timeout": 90}]
actions = JSONField(default=list)
assigned_check = models.ForeignKey(
"checks.Check",
null=True,
blank=True,
related_name="assignedtasks",
on_delete=models.SET_NULL,
)
name = models.CharField(max_length=255)
collector_all_output = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
continue_on_error = models.BooleanField(default=True)
alert_severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_alert = models.BooleanField(default=False)
text_alert = models.BooleanField(default=False)
dashboard_alert = models.BooleanField(default=False)
# options sent to agent for task creation
# general task settings
task_type = models.CharField(
max_length=100, choices=TaskType.choices, default=TaskType.MANUAL
)
win_task_name = models.CharField(
max_length=255, unique=True, blank=True, default=generate_task_name
) # should be changed to unique=True
run_time_date = DateTimeField(null=True, blank=True)
expire_date = DateTimeField(null=True, blank=True)
# daily
daily_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(255)]
)
# weekly
run_time_bit_weekdays = models.IntegerField(null=True, blank=True)
weekly_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(52)]
)
run_time_minute = models.CharField(
max_length=5, null=True, blank=True
) # deprecated
# monthly
monthly_days_of_month = models.PositiveBigIntegerField(blank=True, null=True)
monthly_months_of_year = models.PositiveIntegerField(blank=True, null=True)
# monthly days of week
monthly_weeks_of_month = models.PositiveSmallIntegerField(blank=True, null=True)
# additional task settings
task_repetition_duration = models.CharField(max_length=10, null=True, blank=True)
task_repetition_interval = models.CharField(max_length=10, null=True, blank=True)
stop_task_at_duration_end = models.BooleanField(blank=True, default=False)
random_task_delay = models.CharField(max_length=10, null=True, blank=True)
remove_if_not_scheduled = models.BooleanField(default=False)
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
task_instance_policy = models.PositiveSmallIntegerField(blank=True, default=1)
# deprecated
managed_by_policy = models.BooleanField(default=False)
# non-database property
task_result: "Union[TaskResult, Dict[None, None]]" = {}
def __str__(self) -> str:
return self.name
def save(self, *args, **kwargs) -> None:
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
# get old task if exists
old_task = AutomatedTask.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_task, *args, **kwargs)
# check if fields were updated that require a sync to the agent and set status to notsynced
if old_task:
for field in self.fields_that_trigger_task_update_on_agent:
if getattr(self, field) != getattr(old_task, field):
if self.policy:
TaskResult.objects.exclude(
sync_status=TaskSyncStatus.INITIAL
).filter(task__policy_id=self.policy.id).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
else:
TaskResult.objects.filter(agent=self.agent, task=self).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
def delete(self, *args, **kwargs):
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
super().delete(*args, **kwargs)
def schedule(self) -> Optional[str]:
if self.task_type == TaskType.MANUAL:
return "Manual"
elif self.task_type == TaskType.CHECK_FAILURE:
return "Every time check fails"
elif self.task_type == TaskType.RUN_ONCE:
return f'Run once on {self.run_time_date.strftime("%m/%d/%Y %I:%M%p")}'
elif self.task_type == TaskType.DAILY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
if self.daily_interval == 1:
return f"Daily at {run_time_nice}"
else:
return f"Every {self.daily_interval} days at {run_time_nice}"
elif self.task_type == TaskType.WEEKLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
days = bitdays_to_string(self.run_time_bit_weekdays)
if self.weekly_interval != 1:
return f"{days} at {run_time_nice}"
else:
return f"{days} at {run_time_nice} every {self.weekly_interval} weeks"
elif self.task_type == TaskType.MONTHLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
days = bitmonthdays_to_string(self.monthly_days_of_month)
return f"Runs on {months} on days {days} at {run_time_nice}"
elif self.task_type == TaskType.MONTHLY_DOW:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
weeks = bitweeks_to_string(self.monthly_weeks_of_month)
days = bitdays_to_string(self.run_time_bit_weekdays)
return f"Runs on {months} on {weeks} on {days} at {run_time_nice}"
elif self.task_type == TaskType.ONBOARDING:
return "Onboarding: Runs once on task creation."
return None
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
return FIELDS_TRIGGER_TASK_UPDATE_AGENT
def serialize(task):
# serializes the task and returns json
from .serializers import TaskAuditSerializer
return TaskAuditSerializer(task).data
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
# Copies certain properties on this task (self) to a new task and sets it to the supplied Policy
task = AutomatedTask.objects.create(
policy=policy,
assigned_check=assigned_check,
)
for field in POLICY_TASK_FIELDS_TO_COPY:
setattr(task, field, getattr(self, field))
task.save()
# agent version >= 1.8.0
def generate_nats_task_payload(self) -> Dict[str, Any]:
task = {
"pk": self.pk,
"type": "rmm",
"name": self.win_task_name,
"overwrite_task": True,
"enabled": self.enabled,
"trigger": (
self.task_type
if self.task_type != TaskType.CHECK_FAILURE
else TaskType.MANUAL
),
"multiple_instances": self.task_instance_policy or 0,
"delete_expired_task_after": (
self.remove_if_not_scheduled if self.expire_date else False
),
"start_when_available": (
self.run_asap_after_missed
if self.task_type != TaskType.RUN_ONCE
else True
),
}
if self.task_type in (
TaskType.DAILY,
TaskType.WEEKLY,
TaskType.MONTHLY,
TaskType.MONTHLY_DOW,
TaskType.RUN_ONCE,
):
if not self.run_time_date:
self.run_time_date = djangotime.now()
task["start_year"] = self.run_time_date.year
task["start_month"] = self.run_time_date.month
task["start_day"] = self.run_time_date.day
task["start_hour"] = self.run_time_date.hour
task["start_min"] = self.run_time_date.minute
if self.expire_date:
task["expire_year"] = self.expire_date.year
task["expire_month"] = self.expire_date.month
task["expire_day"] = self.expire_date.day
task["expire_hour"] = self.expire_date.hour
task["expire_min"] = self.expire_date.minute
if self.random_task_delay:
task["random_delay"] = convert_to_iso_duration(self.random_task_delay)
if self.task_repetition_interval and self.task_repetition_duration:
task["repetition_interval"] = convert_to_iso_duration(
self.task_repetition_interval
)
task["repetition_duration"] = convert_to_iso_duration(
self.task_repetition_duration
)
task["stop_at_duration_end"] = self.stop_task_at_duration_end
if self.task_type == TaskType.DAILY:
task["day_interval"] = self.daily_interval
elif self.task_type == TaskType.WEEKLY:
task["week_interval"] = self.weekly_interval
task["days_of_week"] = self.run_time_bit_weekdays
elif self.task_type == TaskType.MONTHLY:
# check if "last day is configured"
if self.monthly_days_of_month >= 0x80000000:
task["days_of_month"] = self.monthly_days_of_month - 0x80000000
task["run_on_last_day_of_month"] = True
else:
task["days_of_month"] = self.monthly_days_of_month
task["run_on_last_day_of_month"] = False
task["months_of_year"] = self.monthly_months_of_year
elif self.task_type == TaskType.MONTHLY_DOW:
task["days_of_week"] = self.run_time_bit_weekdays
task["months_of_year"] = self.monthly_months_of_year
task["weeks_of_month"] = self.monthly_weeks_of_month
return task
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.INITIAL
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to create scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully created."
)
return "ok"
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.NOT_SYNCED
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to modify scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully modified."
)
return "ok"
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": self.win_task_name},
}
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok" and "The system cannot find the file specified" not in r:
task_result.sync_status = TaskSyncStatus.PENDING_DELETION
with suppress(DatabaseError):
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to delete task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
self.delete()
logger.info(f"{task_result.agent.hostname} task {self.name} was deleted.")
return "ok"
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
asyncio.run(
task_result.agent.nats_cmd(
{"func": "runtask", "taskpk": self.pk}, wait=False
)
)
return "ok"
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.task_always_alert
or alert_template.task_always_email
or alert_template.task_always_text
)
)
)
def prune_check_history(older_than_days: int) -> str:
from .models import CheckHistory
CheckHistory.objects.filter(
x__lt=djangotime.make_aware(dt.datetime.today())
- djangotime.timedelta(days=older_than_days)
).delete()
return "ok"
def get_core_settings() -> "CoreSettings":
from core.models import CORESETTINGS_CACHE_KEY, CoreSettings
coresettings = cache.get(CORESETTINGS_CACHE_KEY)
if coresettings and isinstance(coresettings, CoreSettings):
return coresettings
else:
coresettings = CoreSettings.objects.first()
if not coresettings:
raise CoreSettingsNotFound("CoreSettings not found.")
cache.set(CORESETTINGS_CACHE_KEY, coresettings, 600)
return cast(CoreSettings, coresettings)
def prune_debug_log(older_than_days: int) -> str:
from .models import DebugLog
DebugLog.objects.filter(
entry_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"
def prune_audit_log(older_than_days: int) -> str:
from .models import AuditLog
AuditLog.objects.filter(
entry_time__lt=djangotime.now() - djangotime.timedelta(days=older_than_days)
).delete()
return "ok"
def core_maintenance_tasks() -> None:
AutomatedTask.objects.filter(
remove_if_not_scheduled=True, expire_date__lt=djangotime.now()
).delete()
remove_orphaned_history_results()
core = get_core_settings()
# remove old CheckHistory data
if core.check_history_prune_days > 0:
prune_check_history.delay(core.check_history_prune_days)
# remove old resolved alerts
if core.resolved_alerts_prune_days > 0:
prune_resolved_alerts.delay(core.resolved_alerts_prune_days)
# remove old agent history
if core.agent_history_prune_days > 0:
prune_agent_history.delay(core.agent_history_prune_days)
# remove old debug logs
if core.debug_log_prune_days > 0:
prune_debug_log.delay(core.debug_log_prune_days)
# remove old audit logs
if core.audit_log_prune_days > 0:
prune_audit_log.delay(core.audit_log_prune_days)
# clear faults
if core.clear_faults_days > 0:
clear_faults_task.delay(core.clear_faults_days) | null |
189,074 | import asyncio
import traceback
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Any
import nats
from django.conf import settings
from django.db import transaction
from django.db.models import Prefetch
from django.db.utils import DatabaseError
from django.utils import timezone as djangotime
from packaging import version as pyver
from accounts.models import User
from accounts.utils import is_superuser
from agents.models import Agent
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.models import Alert
from alerts.tasks import prune_resolved_alerts
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckHistory, CheckResult
from checks.tasks import prune_check_history
from clients.models import Client, Site
from core.mesh_utils import (
MeshSync,
build_mesh_display_name,
has_mesh_perms,
transform_mesh,
transform_trmm,
)
from core.models import CoreSettings
from core.utils import get_core_settings, get_mesh_ws_url
from logs.models import PendingAction
from logs.tasks import prune_audit_log, prune_debug_log
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_ONLINE,
AGENT_STATUS_OVERDUE,
RESOLVE_ALERTS_LOCK,
SYNC_MESH_PERMS_TASK_LOCK,
SYNC_SCHED_TASK_LOCK,
AlertSeverity,
AlertType,
PAAction,
PAStatus,
TaskStatus,
TaskSyncStatus,
TaskType,
)
from tacticalrmm.helpers import make_random_password, setup_nats_options
from tacticalrmm.logger import logger
from tacticalrmm.nats_utils import a_nats_cmd
from tacticalrmm.permissions import _has_perm_on_agent
from tacticalrmm.utils import redis_lock
class PendingAction(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="pendingactions",
on_delete=models.CASCADE,
)
entry_time = models.DateTimeField(auto_now_add=True)
action_type = models.CharField(
max_length=255, choices=PAAction.choices, null=True, blank=True
)
status = models.CharField(
max_length=255,
choices=PAStatus.choices,
default=PAStatus.PENDING,
)
details = models.JSONField(null=True, blank=True)
def __str__(self) -> str:
return f"{self.agent.hostname} - {self.action_type}"
def due(self) -> str:
if self.action_type == PAAction.SCHED_REBOOT:
return cast(str, self.details["time"])
elif self.action_type == PAAction.AGENT_UPDATE:
return "Next update cycle"
elif self.action_type == PAAction.CHOCO_INSTALL:
return "ASAP"
return "On next checkin"
def description(self) -> Optional[str]:
if self.action_type == PAAction.SCHED_REBOOT:
return "Device pending reboot"
elif self.action_type == PAAction.AGENT_UPDATE:
return f"Agent update to {self.details['version']}"
elif self.action_type == PAAction.CHOCO_INSTALL:
return f"{self.details['name']} software install"
elif self.action_type in (
PAAction.RUN_CMD,
PAAction.RUN_SCRIPT,
PAAction.RUN_PATCH_SCAN,
PAAction.RUN_PATCH_INSTALL,
):
return f"{self.action_type}"
return None
def resolve_pending_actions() -> None:
# change agent update pending status to completed if agent has just updated
actions: "QuerySet[PendingAction]" = (
PendingAction.objects.select_related("agent")
.defer("agent__services", "agent__wmi_detail")
.filter(action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING)
)
to_update: list[int] = [
action.id
for action in actions
if pyver.parse(action.agent.version) == pyver.parse(settings.LATEST_AGENT_VER)
and action.agent.status == AGENT_STATUS_ONLINE
]
PendingAction.objects.filter(pk__in=to_update).update(status=PAStatus.COMPLETED) | null |
189,075 | import asyncio
import traceback
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Any
import nats
from django.conf import settings
from django.db import transaction
from django.db.models import Prefetch
from django.db.utils import DatabaseError
from django.utils import timezone as djangotime
from packaging import version as pyver
from accounts.models import User
from accounts.utils import is_superuser
from agents.models import Agent
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.models import Alert
from alerts.tasks import prune_resolved_alerts
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckHistory, CheckResult
from checks.tasks import prune_check_history
from clients.models import Client, Site
from core.mesh_utils import (
MeshSync,
build_mesh_display_name,
has_mesh_perms,
transform_mesh,
transform_trmm,
)
from core.models import CoreSettings
from core.utils import get_core_settings, get_mesh_ws_url
from logs.models import PendingAction
from logs.tasks import prune_audit_log, prune_debug_log
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_ONLINE,
AGENT_STATUS_OVERDUE,
RESOLVE_ALERTS_LOCK,
SYNC_MESH_PERMS_TASK_LOCK,
SYNC_SCHED_TASK_LOCK,
AlertSeverity,
AlertType,
PAAction,
PAStatus,
TaskStatus,
TaskSyncStatus,
TaskType,
)
from tacticalrmm.helpers import make_random_password, setup_nats_options
from tacticalrmm.logger import logger
from tacticalrmm.nats_utils import a_nats_cmd
from tacticalrmm.permissions import _has_perm_on_agent
from tacticalrmm.utils import redis_lock
def _get_agent_qs() -> "QuerySet[Agent]":
qs: "QuerySet[Agent]" = (
Agent.objects.defer(*AGENT_DEFER)
.select_related(
"site__server_policy",
"site__workstation_policy",
"site__client__server_policy",
"site__client__workstation_policy",
"policy",
"policy__alert_template",
"alert_template",
)
.prefetch_related(
Prefetch(
"agentchecks",
queryset=Check.objects.select_related("script"),
),
Prefetch(
"checkresults",
queryset=CheckResult.objects.select_related("assigned_check"),
),
Prefetch(
"taskresults",
queryset=TaskResult.objects.select_related("task"),
),
"autotasks",
)
)
return qs
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
def resolve_alerts_task(self) -> str:
with redis_lock(RESOLVE_ALERTS_LOCK, self.app.oid) as acquired:
if not acquired:
return f"{self.app.oid} still running"
# TODO rework this to not use an agent queryset, use Alerts
for agent in _get_agent_qs():
if (
pyver.parse(agent.version) >= pyver.parse("1.6.0")
and agent.status == AGENT_STATUS_ONLINE
):
# handles any alerting actions
if Alert.objects.filter(
alert_type=AlertType.AVAILABILITY, agent=agent, resolved=False
).exists():
Alert.handle_alert_resolve(agent)
return "completed" | null |
189,076 | import asyncio
import traceback
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Any
import nats
from django.conf import settings
from django.db import transaction
from django.db.models import Prefetch
from django.db.utils import DatabaseError
from django.utils import timezone as djangotime
from packaging import version as pyver
from accounts.models import User
from accounts.utils import is_superuser
from agents.models import Agent
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.models import Alert
from alerts.tasks import prune_resolved_alerts
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckHistory, CheckResult
from checks.tasks import prune_check_history
from clients.models import Client, Site
from core.mesh_utils import (
MeshSync,
build_mesh_display_name,
has_mesh_perms,
transform_mesh,
transform_trmm,
)
from core.models import CoreSettings
from core.utils import get_core_settings, get_mesh_ws_url
from logs.models import PendingAction
from logs.tasks import prune_audit_log, prune_debug_log
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_ONLINE,
AGENT_STATUS_OVERDUE,
RESOLVE_ALERTS_LOCK,
SYNC_MESH_PERMS_TASK_LOCK,
SYNC_SCHED_TASK_LOCK,
AlertSeverity,
AlertType,
PAAction,
PAStatus,
TaskStatus,
TaskSyncStatus,
TaskType,
)
from tacticalrmm.helpers import make_random_password, setup_nats_options
from tacticalrmm.logger import logger
from tacticalrmm.nats_utils import a_nats_cmd
from tacticalrmm.permissions import _has_perm_on_agent
from tacticalrmm.utils import redis_lock
def _get_agent_qs() -> "QuerySet[Agent]":
qs: "QuerySet[Agent]" = (
Agent.objects.defer(*AGENT_DEFER)
.select_related(
"site__server_policy",
"site__workstation_policy",
"site__client__server_policy",
"site__client__workstation_policy",
"policy",
"policy__alert_template",
"alert_template",
)
.prefetch_related(
Prefetch(
"agentchecks",
queryset=Check.objects.select_related("script"),
),
Prefetch(
"checkresults",
queryset=CheckResult.objects.select_related("assigned_check"),
),
Prefetch(
"taskresults",
queryset=TaskResult.objects.select_related("task"),
),
"autotasks",
)
)
return qs
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class AutomatedTask(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="autotasks",
on_delete=models.CASCADE,
null=True,
blank=True,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.CASCADE,
)
custom_field = models.ForeignKey(
"core.CustomField",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
# format -> [{"type": "script", "script": 1, "name": "Script Name", "timeout": 90, "script_args": [], "env_vars": []}, {"type": "cmd", "command": "whoami", "timeout": 90}]
actions = JSONField(default=list)
assigned_check = models.ForeignKey(
"checks.Check",
null=True,
blank=True,
related_name="assignedtasks",
on_delete=models.SET_NULL,
)
name = models.CharField(max_length=255)
collector_all_output = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
continue_on_error = models.BooleanField(default=True)
alert_severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_alert = models.BooleanField(default=False)
text_alert = models.BooleanField(default=False)
dashboard_alert = models.BooleanField(default=False)
# options sent to agent for task creation
# general task settings
task_type = models.CharField(
max_length=100, choices=TaskType.choices, default=TaskType.MANUAL
)
win_task_name = models.CharField(
max_length=255, unique=True, blank=True, default=generate_task_name
) # should be changed to unique=True
run_time_date = DateTimeField(null=True, blank=True)
expire_date = DateTimeField(null=True, blank=True)
# daily
daily_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(255)]
)
# weekly
run_time_bit_weekdays = models.IntegerField(null=True, blank=True)
weekly_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(52)]
)
run_time_minute = models.CharField(
max_length=5, null=True, blank=True
) # deprecated
# monthly
monthly_days_of_month = models.PositiveBigIntegerField(blank=True, null=True)
monthly_months_of_year = models.PositiveIntegerField(blank=True, null=True)
# monthly days of week
monthly_weeks_of_month = models.PositiveSmallIntegerField(blank=True, null=True)
# additional task settings
task_repetition_duration = models.CharField(max_length=10, null=True, blank=True)
task_repetition_interval = models.CharField(max_length=10, null=True, blank=True)
stop_task_at_duration_end = models.BooleanField(blank=True, default=False)
random_task_delay = models.CharField(max_length=10, null=True, blank=True)
remove_if_not_scheduled = models.BooleanField(default=False)
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
task_instance_policy = models.PositiveSmallIntegerField(blank=True, default=1)
# deprecated
managed_by_policy = models.BooleanField(default=False)
# non-database property
task_result: "Union[TaskResult, Dict[None, None]]" = {}
def __str__(self) -> str:
return self.name
def save(self, *args, **kwargs) -> None:
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
# get old task if exists
old_task = AutomatedTask.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_task, *args, **kwargs)
# check if fields were updated that require a sync to the agent and set status to notsynced
if old_task:
for field in self.fields_that_trigger_task_update_on_agent:
if getattr(self, field) != getattr(old_task, field):
if self.policy:
TaskResult.objects.exclude(
sync_status=TaskSyncStatus.INITIAL
).filter(task__policy_id=self.policy.id).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
else:
TaskResult.objects.filter(agent=self.agent, task=self).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
def delete(self, *args, **kwargs):
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
super().delete(*args, **kwargs)
def schedule(self) -> Optional[str]:
if self.task_type == TaskType.MANUAL:
return "Manual"
elif self.task_type == TaskType.CHECK_FAILURE:
return "Every time check fails"
elif self.task_type == TaskType.RUN_ONCE:
return f'Run once on {self.run_time_date.strftime("%m/%d/%Y %I:%M%p")}'
elif self.task_type == TaskType.DAILY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
if self.daily_interval == 1:
return f"Daily at {run_time_nice}"
else:
return f"Every {self.daily_interval} days at {run_time_nice}"
elif self.task_type == TaskType.WEEKLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
days = bitdays_to_string(self.run_time_bit_weekdays)
if self.weekly_interval != 1:
return f"{days} at {run_time_nice}"
else:
return f"{days} at {run_time_nice} every {self.weekly_interval} weeks"
elif self.task_type == TaskType.MONTHLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
days = bitmonthdays_to_string(self.monthly_days_of_month)
return f"Runs on {months} on days {days} at {run_time_nice}"
elif self.task_type == TaskType.MONTHLY_DOW:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
weeks = bitweeks_to_string(self.monthly_weeks_of_month)
days = bitdays_to_string(self.run_time_bit_weekdays)
return f"Runs on {months} on {weeks} on {days} at {run_time_nice}"
elif self.task_type == TaskType.ONBOARDING:
return "Onboarding: Runs once on task creation."
return None
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
return FIELDS_TRIGGER_TASK_UPDATE_AGENT
def serialize(task):
# serializes the task and returns json
from .serializers import TaskAuditSerializer
return TaskAuditSerializer(task).data
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
# Copies certain properties on this task (self) to a new task and sets it to the supplied Policy
task = AutomatedTask.objects.create(
policy=policy,
assigned_check=assigned_check,
)
for field in POLICY_TASK_FIELDS_TO_COPY:
setattr(task, field, getattr(self, field))
task.save()
# agent version >= 1.8.0
def generate_nats_task_payload(self) -> Dict[str, Any]:
task = {
"pk": self.pk,
"type": "rmm",
"name": self.win_task_name,
"overwrite_task": True,
"enabled": self.enabled,
"trigger": (
self.task_type
if self.task_type != TaskType.CHECK_FAILURE
else TaskType.MANUAL
),
"multiple_instances": self.task_instance_policy or 0,
"delete_expired_task_after": (
self.remove_if_not_scheduled if self.expire_date else False
),
"start_when_available": (
self.run_asap_after_missed
if self.task_type != TaskType.RUN_ONCE
else True
),
}
if self.task_type in (
TaskType.DAILY,
TaskType.WEEKLY,
TaskType.MONTHLY,
TaskType.MONTHLY_DOW,
TaskType.RUN_ONCE,
):
if not self.run_time_date:
self.run_time_date = djangotime.now()
task["start_year"] = self.run_time_date.year
task["start_month"] = self.run_time_date.month
task["start_day"] = self.run_time_date.day
task["start_hour"] = self.run_time_date.hour
task["start_min"] = self.run_time_date.minute
if self.expire_date:
task["expire_year"] = self.expire_date.year
task["expire_month"] = self.expire_date.month
task["expire_day"] = self.expire_date.day
task["expire_hour"] = self.expire_date.hour
task["expire_min"] = self.expire_date.minute
if self.random_task_delay:
task["random_delay"] = convert_to_iso_duration(self.random_task_delay)
if self.task_repetition_interval and self.task_repetition_duration:
task["repetition_interval"] = convert_to_iso_duration(
self.task_repetition_interval
)
task["repetition_duration"] = convert_to_iso_duration(
self.task_repetition_duration
)
task["stop_at_duration_end"] = self.stop_task_at_duration_end
if self.task_type == TaskType.DAILY:
task["day_interval"] = self.daily_interval
elif self.task_type == TaskType.WEEKLY:
task["week_interval"] = self.weekly_interval
task["days_of_week"] = self.run_time_bit_weekdays
elif self.task_type == TaskType.MONTHLY:
# check if "last day is configured"
if self.monthly_days_of_month >= 0x80000000:
task["days_of_month"] = self.monthly_days_of_month - 0x80000000
task["run_on_last_day_of_month"] = True
else:
task["days_of_month"] = self.monthly_days_of_month
task["run_on_last_day_of_month"] = False
task["months_of_year"] = self.monthly_months_of_year
elif self.task_type == TaskType.MONTHLY_DOW:
task["days_of_week"] = self.run_time_bit_weekdays
task["months_of_year"] = self.monthly_months_of_year
task["weeks_of_month"] = self.monthly_weeks_of_month
return task
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.INITIAL
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to create scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully created."
)
return "ok"
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.NOT_SYNCED
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to modify scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully modified."
)
return "ok"
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": self.win_task_name},
}
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok" and "The system cannot find the file specified" not in r:
task_result.sync_status = TaskSyncStatus.PENDING_DELETION
with suppress(DatabaseError):
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to delete task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
self.delete()
logger.info(f"{task_result.agent.hostname} task {self.name} was deleted.")
return "ok"
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
asyncio.run(
task_result.agent.nats_cmd(
{"func": "runtask", "taskpk": self.pk}, wait=False
)
)
return "ok"
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.task_always_alert
or alert_template.task_always_email
or alert_template.task_always_text
)
)
)
class TaskResult(models.Model):
class Meta:
unique_together = (("agent", "task"),)
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="taskresults",
on_delete=models.CASCADE,
)
task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="taskresults",
on_delete=models.CASCADE,
)
retcode = models.BigIntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
execution_time = models.CharField(max_length=100, default="0.0000")
last_run = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=30, choices=TaskStatus.choices, default=TaskStatus.PENDING
)
sync_status = models.CharField(
max_length=100, choices=TaskSyncStatus.choices, default=TaskSyncStatus.INITIAL
)
def __str__(self):
return f"{self.agent.hostname} - {self.task}"
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_task_alert(
self.task,
agent=self.agent,
skip_create=not self.task.should_create_alert(alert_template),
)
def save_collector_results(self) -> None:
agent_field = self.task.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.task.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def send_email(self):
CORE = get_core_settings()
# Format of Email sent when Task has email alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
# Format of SMS sent when Task has SMS alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def sync_scheduled_tasks(self) -> str:
with redis_lock(SYNC_SCHED_TASK_LOCK, self.app.oid) as acquired:
if not acquired:
return f"{self.app.oid} still running"
actions: list[tuple[str, int, Agent, Any, str, str]] = [] # list of tuples
for agent in _get_agent_qs():
if (
not agent.is_posix
and pyver.parse(agent.version) >= pyver.parse("1.6.0")
and agent.status == AGENT_STATUS_ONLINE
):
# create a list of tasks to be synced so we can run them asynchronously
for task in agent.get_tasks_with_policies():
# TODO can we just use agent??
agent_obj: "Agent" = agent if task.policy else task.agent
# onboarding tasks require agent >= 2.6.0
if task.task_type == TaskType.ONBOARDING and pyver.parse(
agent.version
) < pyver.parse("2.6.0"):
continue
# policy tasks will be an empty dict on initial
if (not task.task_result) or (
isinstance(task.task_result, TaskResult)
and task.task_result.sync_status == TaskSyncStatus.INITIAL
):
actions.append(
(
"create",
task.id,
agent_obj,
task.generate_nats_task_payload(),
agent.agent_id,
agent.hostname,
)
)
elif (
isinstance(task.task_result, TaskResult)
and task.task_result.sync_status
== TaskSyncStatus.PENDING_DELETION
):
actions.append(
(
"delete",
task.id,
agent_obj,
{},
agent.agent_id,
agent.hostname,
)
)
elif (
isinstance(task.task_result, TaskResult)
and task.task_result.sync_status == TaskSyncStatus.NOT_SYNCED
):
actions.append(
(
"modify",
task.id,
agent_obj,
task.generate_nats_task_payload(),
agent.agent_id,
agent.hostname,
)
)
async def _handle_task_on_agent(
nc: "NATSClient", actions: tuple[str, int, Agent, Any, str, str]
) -> None:
# tuple: (0: action, 1: task.id, 2: agent object, 3: nats task payload, 4: agent_id, 5: agent hostname)
action = actions[0]
task_id = actions[1]
agent = actions[2]
payload = actions[3]
agent_id = actions[4]
hostname = actions[5]
task: "AutomatedTask" = await AutomatedTask.objects.aget(id=task_id)
try:
task_result = await TaskResult.objects.aget(agent=agent, task=task)
except TaskResult.DoesNotExist:
task_result = await TaskResult.objects.acreate(agent=agent, task=task)
if action in ("create", "modify"):
logger.debug(payload)
nats_data = {
"func": "schedtask",
"schedtaskpayload": payload,
}
r = await a_nats_cmd(nc=nc, sub=agent_id, data=nats_data, timeout=10)
if r != "ok":
if action == "create":
task_result.sync_status = TaskSyncStatus.INITIAL
else:
task_result.sync_status = TaskSyncStatus.NOT_SYNCED
logger.error(
f"Unable to {action} scheduled task {task.name} on {hostname}: {r}"
)
else:
task_result.sync_status = TaskSyncStatus.SYNCED
logger.info(
f"{hostname} task {task.name} was {'created' if action == 'create' else 'modified'}"
)
await task_result.asave(update_fields=["sync_status"])
# delete
else:
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": task.win_task_name},
}
r = await a_nats_cmd(nc=nc, sub=agent_id, data=nats_data, timeout=10)
if r != "ok" and "The system cannot find the file specified" not in r:
task_result.sync_status = TaskSyncStatus.PENDING_DELETION
with suppress(DatabaseError):
await task_result.asave(update_fields=["sync_status"])
logger.error(
f"Unable to {action} scheduled task {task.name} on {hostname}: {r}"
)
else:
task_name = task.name
await task.adelete()
logger.info(f"{hostname} task {task_name} was deleted.")
async def _run():
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except Exception as e:
ret = str(e)
logger.error(ret)
return ret
if tasks := [_handle_task_on_agent(nc, task) for task in actions]:
await asyncio.gather(*tasks)
await nc.flush()
await nc.close()
asyncio.run(_run())
return "ok" | null |
189,077 | import asyncio
import traceback
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Any
import nats
from django.conf import settings
from django.db import transaction
from django.db.models import Prefetch
from django.db.utils import DatabaseError
from django.utils import timezone as djangotime
from packaging import version as pyver
from accounts.models import User
from accounts.utils import is_superuser
from agents.models import Agent
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.models import Alert
from alerts.tasks import prune_resolved_alerts
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckHistory, CheckResult
from checks.tasks import prune_check_history
from clients.models import Client, Site
from core.mesh_utils import (
MeshSync,
build_mesh_display_name,
has_mesh_perms,
transform_mesh,
transform_trmm,
)
from core.models import CoreSettings
from core.utils import get_core_settings, get_mesh_ws_url
from logs.models import PendingAction
from logs.tasks import prune_audit_log, prune_debug_log
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_ONLINE,
AGENT_STATUS_OVERDUE,
RESOLVE_ALERTS_LOCK,
SYNC_MESH_PERMS_TASK_LOCK,
SYNC_SCHED_TASK_LOCK,
AlertSeverity,
AlertType,
PAAction,
PAStatus,
TaskStatus,
TaskSyncStatus,
TaskType,
)
from tacticalrmm.helpers import make_random_password, setup_nats_options
from tacticalrmm.logger import logger
from tacticalrmm.nats_utils import a_nats_cmd
from tacticalrmm.permissions import _has_perm_on_agent
from tacticalrmm.utils import redis_lock
def _get_agent_qs() -> "QuerySet[Agent]":
qs: "QuerySet[Agent]" = (
Agent.objects.defer(*AGENT_DEFER)
.select_related(
"site__server_policy",
"site__workstation_policy",
"site__client__server_policy",
"site__client__workstation_policy",
"policy",
"policy__alert_template",
"alert_template",
)
.prefetch_related(
Prefetch(
"agentchecks",
queryset=Check.objects.select_related("script"),
),
Prefetch(
"checkresults",
queryset=CheckResult.objects.select_related("assigned_check"),
),
Prefetch(
"taskresults",
queryset=TaskResult.objects.select_related("task"),
),
"autotasks",
)
)
return qs
def _get_failing_data(agents: "QuerySet[Agent]") -> dict[str, bool]:
data = {"error": False, "warning": False}
for agent in agents:
if agent.maintenance_mode:
break
if (
agent.overdue_email_alert
or agent.overdue_text_alert
or agent.overdue_dashboard_alert
):
if agent.status == AGENT_STATUS_OVERDUE:
data["error"] = True
break
if agent.checks["has_failing_checks"]:
if agent.checks["warning"]:
data["warning"] = True
if agent.checks["failing"]:
data["error"] = True
break
if not data["error"] and not data["warning"]:
for task in agent.get_tasks_with_policies():
if data["error"] and data["warning"]:
break
elif not isinstance(task.task_result, TaskResult):
continue
elif (
not data["error"]
and task.task_result.status == TaskStatus.FAILING
and task.alert_severity == AlertSeverity.ERROR
):
data["error"] = True
elif (
not data["warning"]
and task.task_result.status == TaskStatus.FAILING
and task.alert_severity == AlertSeverity.WARNING
):
data["warning"] = True
return data
class Client(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
name = models.CharField(max_length=255, unique=True)
block_policy_inheritance = models.BooleanField(default=False)
failing_checks = models.JSONField(default=_default_failing_checks_data)
workstation_policy = models.ForeignKey(
"automation.Policy",
related_name="workstation_clients",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
server_policy = models.ForeignKey(
"automation.Policy",
related_name="server_clients",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="clients",
on_delete=models.SET_NULL,
null=True,
blank=True,
)
def save(self, *args, **kwargs):
from alerts.tasks import cache_agents_alert_template
# get old client if exists
old_client = Client.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_client, *args, **kwargs)
# check if polcies have changed and initiate task to reapply policies if so
if old_client and (
old_client.alert_template != self.alert_template
or old_client.workstation_policy != self.workstation_policy
or old_client.server_policy != self.server_policy
):
cache_agents_alert_template.delay()
if old_client and (
old_client.workstation_policy != self.workstation_policy
or old_client.server_policy != self.server_policy
):
sites = self.sites.all()
if old_client.workstation_policy != self.workstation_policy:
for site in sites:
cache.delete_many_pattern(f"site_workstation_*{site.pk}_*")
if old_client.server_policy != self.server_policy:
for site in sites:
cache.delete_many_pattern(f"site_server_*{site.pk}_*")
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
def live_agent_count(self) -> int:
return Agent.objects.defer(*AGENT_DEFER).filter(site__client=self).count()
def serialize(client):
from .serializers import ClientAuditSerializer
# serializes the client and returns json
return ClientAuditSerializer(client).data
class Site(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
client = models.ForeignKey(Client, related_name="sites", on_delete=models.CASCADE)
name = models.CharField(max_length=255)
block_policy_inheritance = models.BooleanField(default=False)
failing_checks = models.JSONField(default=_default_failing_checks_data)
workstation_policy = models.ForeignKey(
"automation.Policy",
related_name="workstation_sites",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
server_policy = models.ForeignKey(
"automation.Policy",
related_name="server_sites",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="sites",
on_delete=models.SET_NULL,
null=True,
blank=True,
)
def save(self, *args, **kwargs):
from alerts.tasks import cache_agents_alert_template
# get old client if exists
old_site = Site.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_site, *args, **kwargs)
# check if polcies have changed and initiate task to reapply policies if so
if old_site:
if (
old_site.alert_template != self.alert_template
or old_site.workstation_policy != self.workstation_policy
or old_site.server_policy != self.server_policy
):
cache_agents_alert_template.delay()
if old_site.workstation_policy != self.workstation_policy:
cache.delete_many_pattern(f"site_workstation_*{self.pk}_*")
if old_site.server_policy != self.server_policy:
cache.delete_many_pattern(f"site_server_*{self.pk}_*")
class Meta:
ordering = ("name",)
unique_together = (("client", "name"),)
def __str__(self):
return self.name
def live_agent_count(self) -> int:
return self.agents.defer(*AGENT_DEFER).count() # type: ignore
def serialize(site):
from .serializers import SiteAuditSerializer
# serializes the site and returns json
return SiteAuditSerializer(site).data
def cache_db_fields_task() -> None:
qs = _get_agent_qs()
# update client/site failing check fields and agent counts
for site in Site.objects.all():
agents = qs.filter(site=site)
site.failing_checks = _get_failing_data(agents)
site.save(update_fields=["failing_checks"])
for client in Client.objects.all():
agents = qs.filter(site__client=client)
client.failing_checks = _get_failing_data(agents)
client.save(update_fields=["failing_checks"]) | null |
189,078 | import asyncio
import traceback
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Any
import nats
from django.conf import settings
from django.db import transaction
from django.db.models import Prefetch
from django.db.utils import DatabaseError
from django.utils import timezone as djangotime
from packaging import version as pyver
from accounts.models import User
from accounts.utils import is_superuser
from agents.models import Agent
from agents.tasks import clear_faults_task, prune_agent_history
from alerts.models import Alert
from alerts.tasks import prune_resolved_alerts
from autotasks.models import AutomatedTask, TaskResult
from checks.models import Check, CheckHistory, CheckResult
from checks.tasks import prune_check_history
from clients.models import Client, Site
from core.mesh_utils import (
MeshSync,
build_mesh_display_name,
has_mesh_perms,
transform_mesh,
transform_trmm,
)
from core.models import CoreSettings
from core.utils import get_core_settings, get_mesh_ws_url
from logs.models import PendingAction
from logs.tasks import prune_audit_log, prune_debug_log
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_ONLINE,
AGENT_STATUS_OVERDUE,
RESOLVE_ALERTS_LOCK,
SYNC_MESH_PERMS_TASK_LOCK,
SYNC_SCHED_TASK_LOCK,
AlertSeverity,
AlertType,
PAAction,
PAStatus,
TaskStatus,
TaskSyncStatus,
TaskType,
)
from tacticalrmm.helpers import make_random_password, setup_nats_options
from tacticalrmm.logger import logger
from tacticalrmm.nats_utils import a_nats_cmd
from tacticalrmm.permissions import _has_perm_on_agent
from tacticalrmm.utils import redis_lock
class User(AbstractUser, BaseAuditModel):
is_active = models.BooleanField(default=True)
block_dashboard_login = models.BooleanField(default=False)
totp_key = models.CharField(max_length=50, null=True, blank=True)
dark_mode = models.BooleanField(default=True)
show_community_scripts = models.BooleanField(default=True)
agent_dblclick_action: "AgentDblClick" = models.CharField(
max_length=50, choices=AgentDblClick.choices, default=AgentDblClick.EDIT_AGENT
)
url_action = models.ForeignKey(
"core.URLAction",
related_name="user",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
default_agent_tbl_tab = models.CharField(
max_length=50, choices=AgentTableTabs.choices, default=AgentTableTabs.MIXED
)
agents_per_page = models.PositiveIntegerField(default=50) # not currently used
client_tree_sort = models.CharField(
max_length=50, choices=ClientTreeSort.choices, default=ClientTreeSort.ALPHA_FAIL
)
client_tree_splitter = models.PositiveIntegerField(default=11)
loading_bar_color = models.CharField(max_length=255, default="red")
dash_info_color = models.CharField(max_length=255, default="info")
dash_positive_color = models.CharField(max_length=255, default="positive")
dash_negative_color = models.CharField(max_length=255, default="negative")
dash_warning_color = models.CharField(max_length=255, default="warning")
clear_search_when_switching = models.BooleanField(default=True)
date_format = models.CharField(max_length=30, blank=True, null=True)
is_installer_user = models.BooleanField(default=False)
last_login_ip = models.GenericIPAddressField(default=None, blank=True, null=True)
agent = models.OneToOneField(
"agents.Agent",
related_name="user",
null=True,
blank=True,
on_delete=models.CASCADE,
)
role = models.ForeignKey(
"accounts.Role",
null=True,
blank=True,
related_name="users",
on_delete=models.SET_NULL,
)
def mesh_user_id(self):
return f"user//{self.mesh_username}"
def mesh_username(self):
# lower() needed for mesh api
return f"{self.username.lower()}___{self.pk}"
def serialize(user):
# serializes the task and returns json
from .serializers import UserSerializer
return UserSerializer(user).data
def get_and_set_role_cache(self) -> "Optional[Role]":
role = cache.get(f"{ROLE_CACHE_PREFIX}{self.role}")
if role and isinstance(role, Role):
return role
elif not role and not self.role:
return None
else:
models.prefetch_related_objects(
[self.role],
"can_view_clients",
"can_view_sites",
)
cache.set(f"{ROLE_CACHE_PREFIX}{self.role}", self.role, 600)
return self.role
def is_superuser(user: "User") -> bool:
return user.role and getattr(user.role, "is_superuser")
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def build_mesh_display_name(
*, first_name: str | None, last_name: str | None, company_name: str | None
) -> str:
ret = ""
if first_name:
ret += first_name
if last_name:
ret += f" {last_name}"
if ret and company_name:
ret += f" - {company_name}"
elif company_name:
ret += company_name
return ret
def has_mesh_perms(*, user: "User") -> bool:
if user.is_superuser or is_superuser(user):
return True
return user.role and getattr(user.role, "can_use_mesh")
def transform_trmm(obj):
ret = []
try:
for node in obj:
node_id = node["node_id"]
user_ids = [link["_id"] for link in node["links"]]
ret.append({"node_id": node_id, "user_ids": user_ids})
except Exception:
logger.debug(traceback.format_exc)
return ret
def transform_mesh(obj):
pattern = re.compile(r".*___\d+")
ret = []
try:
for _, nodes in obj.items():
for node in nodes:
node_id = node["_id"]
try:
user_ids = [
user_id
for user_id in node["links"].keys()
if pattern.match(user_id)
]
except KeyError:
# will trigger on initial sync cuz no mesh users yet
# also triggers for invalid agents after sync
pass
else:
ret.append({"node_id": node_id, "user_ids": user_ids})
except Exception:
logger.debug(traceback.format_exc)
return ret
class MeshSync:
def __init__(self, uri: str):
self.uri = uri
self.mesh_users = self.get_trmm_mesh_users() # full list
def mesh_action(
self, *, payload: dict[str, Any], wait=True
) -> dict[str, Any] | None:
async def _do(payload):
async with websockets.connect(self.uri, max_size=TRMM_WS_MAX_SIZE) as ws:
await ws.send(json.dumps(payload))
if wait:
while 1:
try:
message = await asyncio.wait_for(ws.recv(), 120)
r = json.loads(message)
if r["action"] == payload["action"]:
return r
except asyncio.TimeoutError:
logger.error("Timeout reached.")
return None
else:
return None
payload["responseid"] = "meshctrl"
logger.debug(payload)
return asyncio.run(_do(payload))
def get_unique_mesh_users(
self, trmm_agents_list: list[dict[str, Any]]
) -> list[str]:
userids = [i["links"] for i in trmm_agents_list]
all_ids = [item["_id"] for sublist in userids for item in sublist]
return list(set(all_ids))
def get_trmm_mesh_users(self):
payload = {"action": "users"}
ret = {
i["_id"]: i
for i in self.mesh_action(payload=payload, wait=True)["users"]
if re.search(r".*___\d+", i["_id"])
}
return ret
def add_users_to_node(self, *, node_id: str, user_ids: list[str]):
payload = {
"action": "adddeviceuser",
"nodeid": node_id,
"usernames": [s.replace("user//", "") for s in user_ids],
"rights": 72,
"remove": False,
}
self.mesh_action(payload=payload, wait=False)
def delete_users_from_node(self, *, node_id: str, user_ids: list[str]):
payload = {
"action": "adddeviceuser",
"nodeid": node_id,
"userids": user_ids,
"rights": 0,
"remove": True,
}
self.mesh_action(payload=payload, wait=False)
def update_mesh_displayname(self, *, user_info: dict[str, Any]) -> None:
payload = {
"action": "edituser",
"id": user_info["_id"],
"realname": user_info["full_name"],
}
self.mesh_action(payload=payload, wait=False)
def add_user_to_mesh(self, *, user_info: dict[str, Any]) -> None:
payload = {
"action": "adduser",
"username": user_info["username"],
"email": user_info["email"],
"pass": make_random_password(len=30),
"resetNextLogin": False,
"randomPassword": False,
"removeEvents": False,
"emailVerified": True,
}
self.mesh_action(payload=payload, wait=False)
if user_info["full_name"]:
self.update_mesh_displayname(user_info=user_info)
def delete_user_from_mesh(self, *, mesh_user_id: str) -> None:
payload = {
"action": "deleteuser",
"userid": mesh_user_id,
}
self.mesh_action(payload=payload, wait=False)
def add_agent_to_user(self, *, user_id: str, node_id: str) -> None:
payload = {
"action": "adddeviceuser",
"nodeid": node_id,
"userids": [user_id],
"rights": 72,
"remove": False,
}
self.mesh_action(payload=payload, wait=False)
def remove_agent_from_user(self, *, user_id: str, node_id: str) -> None:
payload = {
"action": "adddeviceuser",
"nodeid": node_id,
"userids": [user_id],
"rights": 0,
"remove": True,
}
self.mesh_action(payload=payload, wait=False)
class CoreSettings(BaseAuditModel):
email_alert_recipients = ArrayField(
models.EmailField(null=True, blank=True),
blank=True,
default=list,
)
sms_alert_recipients = ArrayField(
models.CharField(max_length=255, null=True, blank=True),
blank=True,
default=list,
)
twilio_number = models.CharField(max_length=255, null=True, blank=True)
twilio_account_sid = models.CharField(max_length=255, null=True, blank=True)
twilio_auth_token = models.CharField(max_length=255, null=True, blank=True)
smtp_from_email = models.CharField(
max_length=255, blank=True, default="[email protected]"
)
smtp_from_name = models.CharField(max_length=255, null=True, blank=True)
smtp_host = models.CharField(max_length=255, blank=True, default="smtp.gmail.com")
smtp_host_user = models.CharField(
max_length=255, blank=True, default="[email protected]"
)
smtp_host_password = models.CharField(
max_length=255, blank=True, default="changeme"
)
smtp_port = models.PositiveIntegerField(default=587, blank=True)
smtp_requires_auth = models.BooleanField(default=True)
default_time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, default="America/Los_Angeles"
)
# removes check history older than days
check_history_prune_days = models.PositiveIntegerField(default=30)
resolved_alerts_prune_days = models.PositiveIntegerField(default=0)
agent_history_prune_days = models.PositiveIntegerField(default=60)
debug_log_prune_days = models.PositiveIntegerField(default=30)
audit_log_prune_days = models.PositiveIntegerField(default=0)
agent_debug_level = models.CharField(
max_length=20, choices=DebugLogLevel.choices, default=DebugLogLevel.INFO
)
clear_faults_days = models.IntegerField(default=0)
mesh_token = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_username = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_site = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_device_group = models.CharField(
max_length=255, null=True, blank=True, default="TacticalRMM"
)
mesh_company_name = models.CharField(max_length=255, null=True, blank=True)
sync_mesh_with_trmm = models.BooleanField(default=True)
agent_auto_update = models.BooleanField(default=True)
workstation_policy = models.ForeignKey(
"automation.Policy",
related_name="default_workstation_policy",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
server_policy = models.ForeignKey(
"automation.Policy",
related_name="default_server_policy",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="default_alert_template",
on_delete=models.SET_NULL,
null=True,
blank=True,
)
date_format = models.CharField(
max_length=30, blank=True, default="MMM-DD-YYYY - HH:mm"
)
open_ai_token = models.CharField(max_length=255, null=True, blank=True)
open_ai_model = models.CharField(
max_length=255, blank=True, default="gpt-3.5-turbo"
)
def save(self, *args, **kwargs) -> None:
from alerts.tasks import cache_agents_alert_template
cache.delete(CORESETTINGS_CACHE_KEY)
if not self.pk and CoreSettings.objects.exists():
raise ValidationError("There can only be one CoreSettings instance")
# for install script
if not self.pk:
with suppress(Exception):
self.mesh_site = settings.MESH_SITE
self.mesh_username = settings.MESH_USERNAME.lower()
self.mesh_token = settings.MESH_TOKEN_KEY
old_settings = type(self).objects.get(pk=self.pk) if self.pk else None
super().save(*args, **kwargs)
if old_settings:
if (
old_settings.alert_template != self.alert_template
or old_settings.server_policy != self.server_policy
or old_settings.workstation_policy != self.workstation_policy
):
cache_agents_alert_template.delay()
if old_settings.workstation_policy != self.workstation_policy:
cache.delete_many_pattern("site_workstation_*")
if old_settings.server_policy != self.server_policy:
cache.delete_many_pattern("site_server_*")
if (
old_settings.server_policy != self.server_policy
or old_settings.workstation_policy != self.workstation_policy
):
cache.delete_many_pattern("agent_*")
def __str__(self) -> str:
return "Global Site Settings"
def mesh_api_superuser(self) -> str:
# must be lowercase otherwise mesh api breaks
return self.mesh_username.lower()
def sms_is_configured(self) -> bool:
return all(
[
self.twilio_auth_token,
self.twilio_account_sid,
self.twilio_number,
]
)
def email_is_configured(self) -> bool:
# smtp with username/password authentication
if (
self.smtp_requires_auth
and self.smtp_from_email
and self.smtp_host
and self.smtp_host_user
and self.smtp_host_password
and self.smtp_port
):
return True
# smtp relay
elif (
not self.smtp_requires_auth
and self.smtp_from_email
and self.smtp_host
and self.smtp_port
):
return True
return False
def send_mail(
self,
subject: str,
body: str,
alert_template: "Optional[AlertTemplate]" = None,
test: bool = False,
) -> tuple[str, bool]:
if test and not self.email_is_configured:
return "There needs to be at least one email recipient configured", False
# return since email must be configured to continue
elif not self.email_is_configured:
return "SMTP messaging not configured.", False
# override email from if alert_template is passed and is set
if alert_template and alert_template.email_from:
from_address = alert_template.email_from
else:
from_address = self.smtp_from_email
# override email recipients if alert_template is passed and is set
if alert_template and alert_template.email_recipients:
email_recipients = ", ".join(alert_template.email_recipients)
elif self.email_alert_recipients:
email_recipients = ", ".join(cast(List[str], self.email_alert_recipients))
else:
return "There needs to be at least one email recipient configured", False
try:
msg = EmailMessage()
msg["Subject"] = subject
if self.smtp_from_name:
msg["From"] = Address(
display_name=self.smtp_from_name, addr_spec=from_address
)
else:
msg["From"] = from_address
msg["To"] = email_recipients
msg.set_content(body)
with smtplib.SMTP(self.smtp_host, self.smtp_port, timeout=20) as server:
if self.smtp_requires_auth:
server.ehlo()
server.starttls()
server.login(
self.smtp_host_user,
self.smtp_host_password,
)
server.send_message(msg)
server.quit()
else:
# gmail smtp relay specific handling.
if self.smtp_host == "smtp-relay.gmail.com":
server.ehlo()
server.starttls()
server.send_message(msg)
server.quit()
else:
# smtp relay. no auth required
server.send_message(msg)
server.quit()
except Exception as e:
DebugLog.error(message=f"Sending email failed with error: {e}")
if test:
return str(e), False
if test:
return "Email test ok!", True
return "ok", True
def send_sms(
self,
body: str,
alert_template: "Optional[AlertTemplate]" = None,
test: bool = False,
) -> tuple[str, bool]:
if not self.sms_is_configured:
return "Sms alerting is not setup correctly.", False
# override email recipients if alert_template is passed and is set
if alert_template and alert_template.text_recipients:
text_recipients = alert_template.text_recipients
elif self.sms_alert_recipients:
text_recipients = cast(List[str], self.sms_alert_recipients)
else:
return "No sms recipients found", False
tw_client = TwClient(self.twilio_account_sid, self.twilio_auth_token)
for num in text_recipients:
try:
tw_client.messages.create(body=body, to=num, from_=self.twilio_number)
except TwilioRestException as e:
DebugLog.error(message=f"SMS failed to send: {e}")
if test:
return str(e), False
if test:
return "SMS Test sent successfully!", True
return "ok", True
def serialize(core):
# serializes the core and returns json
from .serializers import CoreSerializer
return CoreSerializer(core).data
def get_mesh_ws_url() -> str:
core = get_core_settings()
token = get_auth_token(core.mesh_api_superuser, core.mesh_token)
if settings.DOCKER_BUILD:
uri = f"{settings.MESH_WS_URL}/control.ashx?auth={token}"
else:
if getattr(settings, "USE_EXTERNAL_MESH", False):
site = core.mesh_site.replace("https", "wss")
uri = f"{site}/control.ashx?auth={token}"
else:
mesh_port = getattr(settings, "MESH_PORT", 4430)
uri = f"ws://127.0.0.1:{mesh_port}/control.ashx?auth={token}"
return uri
def sync_mesh_perms_task(self):
with redis_lock(SYNC_MESH_PERMS_TASK_LOCK, self.app.oid) as acquired:
if not acquired:
return f"{self.app.oid} still running"
try:
core = CoreSettings.objects.first()
do_not_sync = not core.sync_mesh_with_trmm
uri = get_mesh_ws_url()
ms = MeshSync(uri)
if do_not_sync:
for user in ms.mesh_users:
ms.delete_user_from_mesh(mesh_user_id=user)
return
company_name = core.mesh_company_name
mnp = {"action": "nodes"}
mesh_nodes_raw = ms.mesh_action(payload=mnp, wait=True)["nodes"]
users = User.objects.select_related("role").filter(
agent=None,
is_installer_user=False,
is_active=True,
block_dashboard_login=False,
)
trmm_agents_meshnodeids = [
f"node//{i.hex_mesh_node_id}"
for i in Agent.objects.only("mesh_node_id")
if i.mesh_node_id
]
mesh_users_dict = {}
for user in users:
full_name = build_mesh_display_name(
first_name=user.first_name,
last_name=user.last_name,
company_name=company_name,
)
# mesh user creation will fail if same email exists for another user
# make sure that doesn't happen by making a random email
rand_str1 = make_random_password(len=6)
rand_str2 = make_random_password(len=5)
email = f"{user.username}.{rand_str1}@tacticalrmm-do-not-change-{rand_str2}.local"
mesh_users_dict[user.mesh_user_id] = {
"_id": user.mesh_user_id,
"username": user.mesh_username,
"full_name": full_name,
"email": email,
}
new_trmm_agents = []
for agent in Agent.objects.defer(*AGENT_DEFER):
agent_dict = {
"node_id": f"node//{agent.hex_mesh_node_id}",
"hostname": agent.hostname,
}
tmp: list[dict[str, str]] = []
for user in users:
if not has_mesh_perms(user=user):
logger.debug(f"No mesh perms for {user} on {agent.hostname}")
continue
if (user.is_superuser or is_superuser(user)) or _has_perm_on_agent(
user, agent.agent_id
):
tmp.append({"_id": user.mesh_user_id})
agent_dict["links"] = tmp
new_trmm_agents.append(agent_dict)
final_trmm = transform_trmm(new_trmm_agents)
final_mesh = transform_mesh(mesh_nodes_raw)
# delete users first
source_users_global = set()
for item in final_trmm:
source_users_global.update(item["user_ids"])
target_users_global = set()
for item in final_mesh:
target_users_global.update(item["user_ids"])
# identify and create new users
new_users = list(source_users_global - target_users_global)
for user_id in new_users:
user_info = mesh_users_dict[user_id]
logger.info(f"Adding new user {user_info['username']} to mesh")
ms.add_user_to_mesh(user_info=user_info)
users_to_delete_globally = list(target_users_global - source_users_global)
for user_id in users_to_delete_globally:
logger.info(f"Deleting {user_id} from mesh")
ms.delete_user_from_mesh(mesh_user_id=user_id)
source_map = {item["node_id"]: set(item["user_ids"]) for item in final_trmm}
target_map = {item["node_id"]: set(item["user_ids"]) for item in final_mesh}
def _get_sleep_after_n_inter(n):
# {number of agents: chunk size}
thresholds = {250: 150, 500: 275, 800: 300, 1000: 340}
for threshold, value in sorted(thresholds.items()):
if n <= threshold:
return value
return 375
iter_count = 0
sleep_after = _get_sleep_after_n_inter(len(source_map))
for node_id, source_users in source_map.items():
# skip agents without valid node id
if node_id not in trmm_agents_meshnodeids:
continue
target_users = target_map.get(node_id, set()) - set(
users_to_delete_globally
)
source_users_adjusted = source_users - set(users_to_delete_globally)
# find users that need to be added or deleted
users_to_add = list(source_users_adjusted - target_users)
users_to_delete = list(target_users - source_users_adjusted)
if users_to_add or users_to_delete:
iter_count += 1
if users_to_add:
logger.info(f"Adding {users_to_add} to {node_id}")
ms.add_users_to_node(node_id=node_id, user_ids=users_to_add)
if users_to_delete:
logger.info(f"Deleting {users_to_delete} from {node_id}")
ms.delete_users_from_node(node_id=node_id, user_ids=users_to_delete)
if iter_count % sleep_after == 0 and iter_count != 0:
# mesh is very inefficient with sql, give it time to catch up so we don't crash the system
logger.info(
f"Sleeping for 7 seconds after {iter_count} iterations."
)
sleep(7)
# after all done, see if need to update display name
ms2 = MeshSync(uri)
unique_ids = ms2.get_unique_mesh_users(new_trmm_agents)
for user in unique_ids:
try:
mesh_realname = ms2.mesh_users[user]["realname"]
except KeyError:
mesh_realname = ""
trmm_realname = mesh_users_dict[user]["full_name"]
if mesh_realname != trmm_realname:
logger.info(
f"Display names don't match. Updating {user} name from {mesh_realname} to {trmm_realname}"
)
ms2.update_mesh_displayname(user_info=mesh_users_dict[user])
except Exception:
logger.debug(traceback.format_exc()) | null |
189,079 | import json
import re
from contextlib import suppress
from pathlib import Path
from zoneinfo import ZoneInfo
import psutil
import requests
from cryptography import x509
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.views.decorators.csrf import csrf_exempt
from redis import from_url
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from core.decorators import monitoring_view
from core.tasks import sync_mesh_perms_task
from core.utils import get_core_settings, sysd_svc_is_running, token_is_valid
from logs.models import AuditLog
from tacticalrmm.constants import AuditActionType, PAStatus
from tacticalrmm.helpers import get_certs, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from .models import CodeSignToken, CoreSettings, CustomField, GlobalKVStore, URLAction
from .permissions import (
CodeSignPerms,
CoreSettingsPerms,
CustomFieldPerms,
ServerMaintPerms,
URLActionPerms,
)
from .serializers import (
CodeSignTokenSerializer,
CoreSettingsSerializer,
CustomFieldSerializer,
KeyStoreSerializer,
URLActionSerializer,
)
def version(request):
return Response(settings.APP_VER) | null |
189,080 | import json
import re
from contextlib import suppress
from pathlib import Path
from zoneinfo import ZoneInfo
import psutil
import requests
from cryptography import x509
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.views.decorators.csrf import csrf_exempt
from redis import from_url
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from core.decorators import monitoring_view
from core.tasks import sync_mesh_perms_task
from core.utils import get_core_settings, sysd_svc_is_running, token_is_valid
from logs.models import AuditLog
from tacticalrmm.constants import AuditActionType, PAStatus
from tacticalrmm.helpers import get_certs, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from .models import CodeSignToken, CoreSettings, CustomField, GlobalKVStore, URLAction
from .permissions import (
CodeSignPerms,
CoreSettingsPerms,
CustomFieldPerms,
ServerMaintPerms,
URLActionPerms,
)
from .serializers import (
CodeSignTokenSerializer,
CoreSettingsSerializer,
CustomFieldSerializer,
KeyStoreSerializer,
URLActionSerializer,
)
def clear_entire_cache() -> None:
cache.delete_many_pattern(f"{ROLE_CACHE_PREFIX}*")
cache.delete_many_pattern(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}*")
cache.delete(CORESETTINGS_CACHE_KEY)
cache.delete_many_pattern("site_*")
cache.delete_many_pattern("agent_*")
def clear_cache(request):
from core.utils import clear_entire_cache
clear_entire_cache()
return Response("Cache was cleared!") | null |
189,081 | import json
import re
from contextlib import suppress
from pathlib import Path
from zoneinfo import ZoneInfo
import psutil
import requests
from cryptography import x509
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.views.decorators.csrf import csrf_exempt
from redis import from_url
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from core.decorators import monitoring_view
from core.tasks import sync_mesh_perms_task
from core.utils import get_core_settings, sysd_svc_is_running, token_is_valid
from logs.models import AuditLog
from tacticalrmm.constants import AuditActionType, PAStatus
from tacticalrmm.helpers import get_certs, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from .models import CodeSignToken, CoreSettings, CustomField, GlobalKVStore, URLAction
from .permissions import (
CodeSignPerms,
CoreSettingsPerms,
CustomFieldPerms,
ServerMaintPerms,
URLActionPerms,
)
from .serializers import (
CodeSignTokenSerializer,
CoreSettingsSerializer,
CustomFieldSerializer,
KeyStoreSerializer,
URLActionSerializer,
)
def token_is_expired() -> bool:
from core.models import CodeSignToken
t: "CodeSignToken" = CodeSignToken.objects.first()
if not t or not t.token:
return False
return t.is_expired
def get_core_settings() -> "CoreSettings":
from core.models import CORESETTINGS_CACHE_KEY, CoreSettings
coresettings = cache.get(CORESETTINGS_CACHE_KEY)
if coresettings and isinstance(coresettings, CoreSettings):
return coresettings
else:
coresettings = CoreSettings.objects.first()
if not coresettings:
raise CoreSettingsNotFound("CoreSettings not found.")
cache.set(CORESETTINGS_CACHE_KEY, coresettings, 600)
return cast(CoreSettings, coresettings)
def dashboard_info(request):
from core.utils import token_is_expired
from tacticalrmm.utils import get_latest_trmm_ver, runcmd_placeholder_text
core_settings = get_core_settings()
return Response(
{
"trmm_version": settings.TRMM_VERSION,
"latest_trmm_ver": get_latest_trmm_ver(),
"dark_mode": request.user.dark_mode,
"show_community_scripts": request.user.show_community_scripts,
"dbl_click_action": request.user.agent_dblclick_action,
"default_agent_tbl_tab": request.user.default_agent_tbl_tab,
"url_action": (
request.user.url_action.id if request.user.url_action else None
),
"client_tree_sort": request.user.client_tree_sort,
"client_tree_splitter": request.user.client_tree_splitter,
"loading_bar_color": request.user.loading_bar_color,
"clear_search_when_switching": request.user.clear_search_when_switching,
"hosted": getattr(settings, "HOSTED", False),
"date_format": request.user.date_format,
"default_date_format": core_settings.date_format,
"token_is_expired": token_is_expired(),
"open_ai_integration_enabled": bool(core_settings.open_ai_token),
"dash_info_color": request.user.dash_info_color,
"dash_positive_color": request.user.dash_positive_color,
"dash_negative_color": request.user.dash_negative_color,
"dash_warning_color": request.user.dash_warning_color,
"run_cmd_placeholder_text": runcmd_placeholder_text(),
}
) | null |
189,082 | import json
import re
from contextlib import suppress
from pathlib import Path
from zoneinfo import ZoneInfo
import psutil
import requests
from cryptography import x509
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.views.decorators.csrf import csrf_exempt
from redis import from_url
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from core.decorators import monitoring_view
from core.tasks import sync_mesh_perms_task
from core.utils import get_core_settings, sysd_svc_is_running, token_is_valid
from logs.models import AuditLog
from tacticalrmm.constants import AuditActionType, PAStatus
from tacticalrmm.helpers import get_certs, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from .models import CodeSignToken, CoreSettings, CustomField, GlobalKVStore, URLAction
from .permissions import (
CodeSignPerms,
CoreSettingsPerms,
CustomFieldPerms,
ServerMaintPerms,
URLActionPerms,
)
from .serializers import (
CodeSignTokenSerializer,
CoreSettingsSerializer,
CustomFieldSerializer,
KeyStoreSerializer,
URLActionSerializer,
)
def get_core_settings() -> "CoreSettings":
def email_test(request):
core = get_core_settings()
msg, ok = core.send_mail(
subject="Test from Tactical RMM", body="This is a test message", test=True
)
if not ok:
return notify_error(msg)
return Response(msg) | null |
189,083 | import json
import re
from contextlib import suppress
from pathlib import Path
from zoneinfo import ZoneInfo
import psutil
import requests
from cryptography import x509
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.views.decorators.csrf import csrf_exempt
from redis import from_url
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from core.decorators import monitoring_view
from core.tasks import sync_mesh_perms_task
from core.utils import get_core_settings, sysd_svc_is_running, token_is_valid
from logs.models import AuditLog
from tacticalrmm.constants import AuditActionType, PAStatus
from tacticalrmm.helpers import get_certs, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from .models import CodeSignToken, CoreSettings, CustomField, GlobalKVStore, URLAction
from .permissions import (
CodeSignPerms,
CoreSettingsPerms,
CustomFieldPerms,
ServerMaintPerms,
URLActionPerms,
)
from .serializers import (
CodeSignTokenSerializer,
CoreSettingsSerializer,
CustomFieldSerializer,
KeyStoreSerializer,
URLActionSerializer,
)
def status(request):
class AuditLog(models.Model):
def __str__(self) -> str:
def save(self, *args: Any, **kwargs: Any) -> None:
def audit_mesh_session(
username: str, agent: "Agent", debug_info: Dict[Any, Any] = {}
) -> None:
def audit_raw_command(
username: str,
agent: "Agent",
cmd: str,
shell: str,
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_changed(
username: str,
object_type: str,
before: Dict[Any, Any],
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_add(
username: str,
object_type: str,
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_delete(
username: str,
object_type: str,
before: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_script_run(
username: str, agent: "Agent", script: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_user_failed_login(username: str, debug_info: Dict[Any, Any] = {}) -> None:
def audit_user_failed_twofactor(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_user_login_successful(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_url_action(
username: str,
urlaction: "URLAction",
instance: "Union[Agent, Client, Site]",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_bulk_action(
username: str,
action: str,
affected: Dict[str, Any],
debug_info: Dict[Any, Any] = {},
) -> None:
class PendingAction(models.Model):
def __str__(self) -> str:
def due(self) -> str:
def description(self) -> Optional[str]:
def remove_orphaned_win_tasks(self) -> str:
class Alert(models.Model):
def __str__(self) -> str:
def assigned_agent(self) -> "Optional[Agent]":
def site(self) -> "Site":
def client(self) -> "Client":
def resolve(self) -> None:
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def parse_script_args(self, args: List[str]) -> List[str]:
def server_maintenance(request):
from tacticalrmm.utils import reload_nats
if "action" not in request.data:
return notify_error("The data is incorrect")
if request.data["action"] == "reload_nats":
reload_nats()
return Response("Nats configuration was reloaded successfully.")
if request.data["action"] == "rm_orphaned_tasks":
from autotasks.tasks import remove_orphaned_win_tasks
remove_orphaned_win_tasks.delay()
return Response("The task has been initiated.")
if request.data["action"] == "prune_db":
from logs.models import AuditLog, PendingAction
if "prune_tables" not in request.data:
return notify_error("The data is incorrect.")
tables = request.data["prune_tables"]
records_count = 0
if "audit_logs" in tables:
auditlogs = AuditLog.objects.filter(action=AuditActionType.CHECK_RUN)
records_count += auditlogs.count()
auditlogs.delete()
if "pending_actions" in tables:
pendingactions = PendingAction.objects.filter(status=PAStatus.COMPLETED)
records_count += pendingactions.count()
pendingactions.delete()
if "alerts" in tables:
from alerts.models import Alert
alerts = Alert.objects.all()
records_count += alerts.count()
alerts.delete()
return Response(f"{records_count} records were pruned from the database")
return notify_error("The data is incorrect") | null |
189,084 | from django.db import migrations
def link_sites_to_agents(apps, schema_editor):
Agent = apps.get_model("agents", "Agent")
Site = apps.get_model("clients", "Site")
for agent in Agent.objects.all():
site = Site.objects.get(client__client=agent.client, site=agent.site)
agent.site_link = site
agent.save() | null |
189,085 | from django.db import migrations
def reverse(apps, schema_editor):
Agent = apps.get_model("agents", "Agent")
for agent in Agent.objects.all():
agent.site = agent.site_link.site
agent.client = agent.site_link.client.client
agent.save() | null |
189,086 | import datetime as dt
from time import sleep
from typing import TYPE_CHECKING, Optional
from django.core.management import call_command
from django.utils import timezone as djangotime
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import DebugLog
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_OUTAGES_LOCK,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
from tacticalrmm.helpers import rand_range
from tacticalrmm.utils import redis_lock
def auto_self_agent_update_task() -> None:
call_command("update_agents") | null |
189,087 | import datetime as dt
from time import sleep
from typing import TYPE_CHECKING, Optional
from django.core.management import call_command
from django.utils import timezone as djangotime
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import DebugLog
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_OUTAGES_LOCK,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
from tacticalrmm.helpers import rand_range
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
def agent_outage_email_task(pk: int, alert_interval: Optional[float] = None) -> str:
from alerts.models import Alert
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
if not alert.email_sent:
sleep(rand_range(100, 1500))
alert.agent.send_outage_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
else:
if alert_interval:
# send an email only if the last email sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.email_sent < delta:
sleep(rand_range(100, 1500))
alert.agent.send_outage_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
return "ok" | null |
189,088 | import datetime as dt
from time import sleep
from typing import TYPE_CHECKING, Optional
from django.core.management import call_command
from django.utils import timezone as djangotime
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import DebugLog
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_OUTAGES_LOCK,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
from tacticalrmm.helpers import rand_range
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
def agent_recovery_email_task(pk: int) -> str:
from alerts.models import Alert
sleep(rand_range(100, 1500))
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
alert.agent.send_recovery_email()
alert.resolved_email_sent = djangotime.now()
alert.save(update_fields=["resolved_email_sent"])
return "ok" | null |
189,089 | import datetime as dt
from time import sleep
from typing import TYPE_CHECKING, Optional
from django.core.management import call_command
from django.utils import timezone as djangotime
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import DebugLog
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_OUTAGES_LOCK,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
from tacticalrmm.helpers import rand_range
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
def agent_outage_sms_task(pk: int, alert_interval: Optional[float] = None) -> str:
from alerts.models import Alert
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
if not alert.sms_sent:
sleep(rand_range(100, 1500))
alert.agent.send_outage_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
else:
if alert_interval:
# send an sms only if the last sms sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.sms_sent < delta:
sleep(rand_range(100, 1500))
alert.agent.send_outage_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
return "ok" | null |
189,090 | import datetime as dt
from time import sleep
from typing import TYPE_CHECKING, Optional
from django.core.management import call_command
from django.utils import timezone as djangotime
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import DebugLog
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_OUTAGES_LOCK,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
from tacticalrmm.helpers import rand_range
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
def agent_recovery_sms_task(pk: int) -> str:
from alerts.models import Alert
sleep(rand_range(100, 1500))
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
alert.agent.send_recovery_sms()
alert.resolved_sms_sent = djangotime.now()
alert.save(update_fields=["resolved_sms_sent"])
return "ok" | null |
189,091 | import datetime as dt
from time import sleep
from typing import TYPE_CHECKING, Optional
from django.core.management import call_command
from django.utils import timezone as djangotime
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import DebugLog
from scripts.models import Script
from tacticalrmm.celery import app
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_OUTAGES_LOCK,
AGENT_STATUS_OVERDUE,
CheckStatus,
DebugLogType,
)
from tacticalrmm.helpers import rand_range
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
def __str__(self) -> str:
def assigned_agent(self) -> "Optional[Agent]":
def site(self) -> "Site":
def client(self) -> "Client":
def resolve(self) -> None:
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def parse_script_args(self, args: List[str]) -> List[str]:
def _get_agent_qs() -> "QuerySet[Agent]":
def agent_outages_task(self) -> str:
with redis_lock(AGENT_OUTAGES_LOCK, self.app.oid) as acquired:
if not acquired:
return f"{self.app.oid} still running"
from alerts.models import Alert
from core.tasks import _get_agent_qs
for agent in _get_agent_qs():
if agent.status == AGENT_STATUS_OVERDUE:
Alert.handle_alert_failure(agent)
return "completed" | null |
189,092 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class AgentHostnameSerializer(serializers.ModelSerializer):
client = serializers.ReadOnlyField(source="client.name")
site = serializers.ReadOnlyField(source="site.name")
class Meta:
model = Agent
fields = (
"id",
"hostname",
"agent_id",
"client",
"site",
)
def get_agent_versions(request):
agents = (
Agent.objects.defer(*AGENT_DEFER)
.filter_by_role(request.user) # type: ignore
.select_related("site__client")
)
return Response(
{
"versions": [settings.LATEST_AGENT_VER],
"agents": AgentHostnameSerializer(agents, many=True).data,
}
) | null |
189,093 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
def token_is_valid() -> tuple[str, bool]:
"""
Return type: token: str, is_valid: bool. Token wil be an empty string is not valid.
"""
from core.models import CodeSignToken
t: "Optional[CodeSignToken]" = CodeSignToken.objects.first()
if not t:
return "", False
if not t.token:
return "", False
if t.is_valid:
return t.token, True
return "", False
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def send_agent_update_task(*, agent_ids: list[str], token: str, force: bool) -> None:
agents: "QuerySet[Agent]" = Agent.objects.defer(*AGENT_DEFER).filter(
agent_id__in=agent_ids
)
for agent in agents:
agent.do_update(token=token, force=force)
def update_agents(request):
q = (
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(agent_id__in=request.data["agent_ids"])
.only("agent_id", "version")
)
agent_ids: list[str] = [
i.agent_id
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
token, _ = token_is_valid()
send_agent_update_task.delay(agent_ids=agent_ids, token=token, force=False)
return Response("ok") | null |
189,094 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
class Agent(BaseAuditModel):
def __str__(self) -> str:
def save(self, *args, **kwargs):
def client(self) -> "Client":
def timezone(self) -> str:
def is_posix(self) -> bool:
def arch(self) -> Optional[str]:
def do_update(self, *, token: str = "", force: bool = False) -> str:
def status(self) -> str:
def checks(self) -> Dict[str, Any]:
def pending_actions_count(self) -> int:
def cpu_model(self) -> List[str]:
def graphics(self) -> str:
def local_ips(self) -> str:
def make_model(self) -> str:
def physical_disks(self) -> Sequence[Disk]:
def serial_number(self) -> str:
def hex_mesh_node_id(self) -> str:
def online_agents(cls, min_version: str = "") -> "List[Agent]":
def is_supported_script(self, platforms: List[str]) -> bool:
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
def check_run_interval(self) -> int:
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
def approve_updates(self) -> None:
def get_patch_policy(self) -> "WinUpdatePolicy":
def get_approved_update_guids(self) -> list[str]:
def set_alert_template(self) -> "Optional[AlertTemplate]":
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
def get_checks_from_policies(self) -> "List[Check]":
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
def serialize(agent: "Agent") -> Dict[str, Any]:
def delete_superseded_updates(self) -> None:
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
def send_outage_email(self) -> None:
def send_recovery_email(self) -> None:
def send_outage_sms(self) -> None:
def send_recovery_sms(self) -> None:
def get_event_log(request, agent_id, logtype, days):
if getattr(settings, "DEMO", False):
from tacticalrmm.demo_views import demo_get_eventlog
return demo_get_eventlog()
agent = get_object_or_404(Agent, agent_id=agent_id)
timeout = 180 if logtype == EvtLogNames.SECURITY else 30
data = {
"func": "eventlog",
"timeout": timeout,
"payload": {
"logname": logtype,
"days": str(days),
},
}
r = asyncio.run(agent.nats_cmd(data, timeout=timeout + 2))
if r in ("timeout", "natsdown"):
return notify_error("Unable to contact the agent")
return Response(r) | null |
189,095 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
class AuditLog(models.Model):
def __str__(self) -> str:
def save(self, *args: Any, **kwargs: Any) -> None:
def audit_mesh_session(
username: str, agent: "Agent", debug_info: Dict[Any, Any] = {}
) -> None:
def audit_raw_command(
username: str,
agent: "Agent",
cmd: str,
shell: str,
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_changed(
username: str,
object_type: str,
before: Dict[Any, Any],
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_add(
username: str,
object_type: str,
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_delete(
username: str,
object_type: str,
before: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_script_run(
username: str, agent: "Agent", script: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_user_failed_login(username: str, debug_info: Dict[Any, Any] = {}) -> None:
def audit_user_failed_twofactor(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_user_login_successful(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_url_action(
username: str,
urlaction: "URLAction",
instance: "Union[Agent, Client, Site]",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_bulk_action(
username: str,
action: str,
affected: Dict[str, Any],
debug_info: Dict[Any, Any] = {},
) -> None:
class Agent(BaseAuditModel):
def __str__(self) -> str:
def save(self, *args, **kwargs):
def client(self) -> "Client":
def timezone(self) -> str:
def is_posix(self) -> bool:
def arch(self) -> Optional[str]:
def do_update(self, *, token: str = "", force: bool = False) -> str:
def status(self) -> str:
def checks(self) -> Dict[str, Any]:
def pending_actions_count(self) -> int:
def cpu_model(self) -> List[str]:
def graphics(self) -> str:
def local_ips(self) -> str:
def make_model(self) -> str:
def physical_disks(self) -> Sequence[Disk]:
def serial_number(self) -> str:
def hex_mesh_node_id(self) -> str:
def online_agents(cls, min_version: str = "") -> "List[Agent]":
def is_supported_script(self, platforms: List[str]) -> bool:
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
def check_run_interval(self) -> int:
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
def approve_updates(self) -> None:
def get_patch_policy(self) -> "WinUpdatePolicy":
def get_approved_update_guids(self) -> list[str]:
def set_alert_template(self) -> "Optional[AlertTemplate]":
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
def get_checks_from_policies(self) -> "List[Check]":
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
def serialize(agent: "Agent") -> Dict[str, Any]:
def delete_superseded_updates(self) -> None:
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
def send_outage_email(self) -> None:
def send_recovery_email(self) -> None:
def send_outage_sms(self) -> None:
def send_recovery_sms(self) -> None:
class AgentHistory(models.Model):
def __str__(self) -> str:
def send_raw_cmd(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
timeout = int(request.data["timeout"])
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
data = {
"func": "rawcmd",
"timeout": timeout,
"payload": {
"command": request.data["cmd"],
"shell": shell,
},
"run_as_user": request.data["run_as_user"],
}
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.CMD_RUN,
command=request.data["cmd"],
username=request.user.username[:50],
)
data["id"] = hist.pk
r = asyncio.run(agent.nats_cmd(data, timeout=timeout + 2))
if r == "timeout":
return notify_error("Unable to contact the agent")
AuditLog.audit_raw_command(
username=request.user.username,
agent=agent,
cmd=request.data["cmd"],
shell=shell,
debug_info={"ip": request._client_ip},
)
return Response(r) | null |
189,096 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
def ping(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
status = AGENT_STATUS_OFFLINE
attempts = 0
while 1:
r = asyncio.run(agent.nats_cmd({"func": "ping"}, timeout=2))
if r == "pong":
status = AGENT_STATUS_ONLINE
break
else:
attempts += 1
time.sleep(0.5)
if attempts >= 3:
break
return Response({"name": agent.hostname, "status": status})
def token_is_valid() -> tuple[str, bool]:
"""
Return type: token: str, is_valid: bool. Token wil be an empty string is not valid.
"""
from core.models import CodeSignToken
t: "Optional[CodeSignToken]" = CodeSignToken.objects.first()
if not t:
return "", False
if not t.token:
return "", False
if t.is_valid:
return t.token, True
return "", False
class User(AbstractUser, BaseAuditModel):
is_active = models.BooleanField(default=True)
block_dashboard_login = models.BooleanField(default=False)
totp_key = models.CharField(max_length=50, null=True, blank=True)
dark_mode = models.BooleanField(default=True)
show_community_scripts = models.BooleanField(default=True)
agent_dblclick_action: "AgentDblClick" = models.CharField(
max_length=50, choices=AgentDblClick.choices, default=AgentDblClick.EDIT_AGENT
)
url_action = models.ForeignKey(
"core.URLAction",
related_name="user",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
default_agent_tbl_tab = models.CharField(
max_length=50, choices=AgentTableTabs.choices, default=AgentTableTabs.MIXED
)
agents_per_page = models.PositiveIntegerField(default=50) # not currently used
client_tree_sort = models.CharField(
max_length=50, choices=ClientTreeSort.choices, default=ClientTreeSort.ALPHA_FAIL
)
client_tree_splitter = models.PositiveIntegerField(default=11)
loading_bar_color = models.CharField(max_length=255, default="red")
dash_info_color = models.CharField(max_length=255, default="info")
dash_positive_color = models.CharField(max_length=255, default="positive")
dash_negative_color = models.CharField(max_length=255, default="negative")
dash_warning_color = models.CharField(max_length=255, default="warning")
clear_search_when_switching = models.BooleanField(default=True)
date_format = models.CharField(max_length=30, blank=True, null=True)
is_installer_user = models.BooleanField(default=False)
last_login_ip = models.GenericIPAddressField(default=None, blank=True, null=True)
agent = models.OneToOneField(
"agents.Agent",
related_name="user",
null=True,
blank=True,
on_delete=models.CASCADE,
)
role = models.ForeignKey(
"accounts.Role",
null=True,
blank=True,
related_name="users",
on_delete=models.SET_NULL,
)
def mesh_user_id(self):
return f"user//{self.mesh_username}"
def mesh_username(self):
# lower() needed for mesh api
return f"{self.username.lower()}___{self.pk}"
def serialize(user):
# serializes the task and returns json
from .serializers import UserSerializer
return UserSerializer(user).data
def get_and_set_role_cache(self) -> "Optional[Role]":
role = cache.get(f"{ROLE_CACHE_PREFIX}{self.role}")
if role and isinstance(role, Role):
return role
elif not role and not self.role:
return None
else:
models.prefetch_related_objects(
[self.role],
"can_view_clients",
"can_view_sites",
)
cache.set(f"{ROLE_CACHE_PREFIX}{self.role}", self.role, 600)
return self.role
def get_agent_url(*, goarch: str, plat: str, token: str = "") -> str:
ver = settings.LATEST_AGENT_VER
if token:
params = {
"version": ver,
"arch": goarch,
"token": token,
"plat": plat,
"api": settings.ALLOWED_HOSTS[0],
}
return settings.AGENTS_URL + urllib.parse.urlencode(params)
return f"https://github.com/amidaware/rmmagent/releases/download/v{ver}/tacticalagent-v{ver}-{plat}-{goarch}.exe"
def generate_linux_install(
client: str,
site: str,
agent_type: str,
arch: str,
token: str,
api: str,
download_url: str,
) -> FileResponse:
match arch:
case "amd64":
arch_id = MeshAgentIdent.LINUX64
case "386":
arch_id = MeshAgentIdent.LINUX32
case "arm64":
arch_id = MeshAgentIdent.LINUX_ARM_64
case "arm":
arch_id = MeshAgentIdent.LINUX_ARM_HF
case _:
arch_id = "not_found"
core = get_core_settings()
uri = get_mesh_ws_url()
mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group))
mesh_dl = (
f"{core.mesh_site}/meshagents?id={mesh_id}&installflags=2&meshinstall={arch_id}"
)
text = Path(settings.LINUX_AGENT_SCRIPT).read_text()
replace = {
"agentDLChange": download_url,
"meshDLChange": mesh_dl,
"clientIDChange": client,
"siteIDChange": site,
"agentTypeChange": agent_type,
"tokenChange": token,
"apiURLChange": api,
}
for i, j in replace.items():
text = text.replace(i, j)
text += "\n"
with StringIO(text) as fp:
return FileResponse(
fp.read(), as_attachment=True, filename="linux_agent_install.sh"
)
def install_agent(request):
from knox.models import AuthToken
from accounts.models import User
from agents.utils import get_agent_url
from core.utils import token_is_valid
insecure = getattr(settings, "TRMM_INSECURE", False)
if insecure and request.data["installMethod"] in {"exe", "powershell"}:
return notify_error(
"Not available in insecure mode. Please use the 'Manual' method."
)
# TODO rework this ghetto validation hack
# https://github.com/amidaware/tacticalrmm/issues/1461
try:
int(request.data["expires"])
except ValueError:
return notify_error("Please enter a valid number of hours")
client_id = request.data["client"]
site_id = request.data["site"]
version = settings.LATEST_AGENT_VER
goarch = request.data["goarch"]
plat = request.data["plat"]
if not _has_perm_on_site(request.user, site_id):
raise PermissionDenied()
codesign_token, is_valid = token_is_valid()
if request.data["installMethod"] in {"bash", "mac"} and not is_valid:
return notify_error(
"Linux/Mac agents require code signing. Please see https://docs.tacticalrmm.com/code_signing/ for more info."
)
inno = f"tacticalagent-v{version}-{plat}-{goarch}"
if plat == AgentPlat.WINDOWS:
inno += ".exe"
download_url = get_agent_url(goarch=goarch, plat=plat, token=codesign_token)
installer_user = User.objects.filter(is_installer_user=True).first()
_, token = AuthToken.objects.create(
user=installer_user, expiry=dt.timedelta(hours=int(request.data["expires"]))
)
install_flags = [
"-m",
"install",
"--api",
request.data["api"],
"--client-id",
client_id,
"--site-id",
site_id,
"--agent-type",
request.data["agenttype"],
"--auth",
token,
]
if request.data["installMethod"] == "exe":
from tacticalrmm.utils import generate_winagent_exe
return generate_winagent_exe(
client=client_id,
site=site_id,
agent_type=request.data["agenttype"],
rdp=request.data["rdp"],
ping=request.data["ping"],
power=request.data["power"],
goarch=goarch,
token=token,
api=request.data["api"],
file_name=request.data["fileName"],
)
elif request.data["installMethod"] == "bash":
from agents.utils import generate_linux_install
return generate_linux_install(
client=str(client_id),
site=str(site_id),
agent_type=request.data["agenttype"],
arch=goarch,
token=token,
api=request.data["api"],
download_url=download_url,
)
elif request.data["installMethod"] in {"manual", "mac"}:
resp = {}
if request.data["installMethod"] == "manual":
cmd = [
inno,
"/VERYSILENT",
"/SUPPRESSMSGBOXES",
"&&",
"ping",
"127.0.0.1",
"-n",
"5",
"&&",
r'"C:\Program Files\TacticalAgent\tacticalrmm.exe"',
] + install_flags
if int(request.data["rdp"]):
cmd.append("--rdp")
if int(request.data["ping"]):
cmd.append("--ping")
if int(request.data["power"]):
cmd.append("--power")
if insecure:
cmd.append("--insecure")
resp["cmd"] = " ".join(str(i) for i in cmd)
else:
install_flags.insert(0, f"sudo ./{inno}")
cmd = install_flags.copy()
dl = f"curl -L -o {inno} '{download_url}'"
resp["cmd"] = (
dl + f" && chmod +x {inno} && " + " ".join(str(i) for i in cmd)
)
if insecure:
resp["cmd"] += " --insecure"
resp["url"] = download_url
return Response(resp)
elif request.data["installMethod"] == "powershell":
text = Path(settings.BASE_DIR / "core" / "installer.ps1").read_text()
replace_dict = {
"innosetupchange": inno,
"clientchange": str(client_id),
"sitechange": str(site_id),
"apichange": request.data["api"],
"atypechange": request.data["agenttype"],
"powerchange": str(request.data["power"]),
"rdpchange": str(request.data["rdp"]),
"pingchange": str(request.data["ping"]),
"downloadchange": download_url,
"tokenchange": token,
}
for i, j in replace_dict.items():
text = text.replace(i, j)
with StringIO(text) as fp:
response = HttpResponse(fp.read(), content_type="text/plain")
response["Content-Disposition"] = "attachment; filename=rmm-installer.ps1"
return response | null |
189,097 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
def get_mesh_ws_url() -> str:
core = get_core_settings()
token = get_auth_token(core.mesh_api_superuser, core.mesh_token)
if settings.DOCKER_BUILD:
uri = f"{settings.MESH_WS_URL}/control.ashx?auth={token}"
else:
if getattr(settings, "USE_EXTERNAL_MESH", False):
site = core.mesh_site.replace("https", "wss")
uri = f"{site}/control.ashx?auth={token}"
else:
mesh_port = getattr(settings, "MESH_PORT", 4430)
uri = f"ws://127.0.0.1:{mesh_port}/control.ashx?auth={token}"
return uri
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def recover(request, agent_id: str) -> Response:
agent: Agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER), agent_id=agent_id
)
mode = request.data["mode"]
if mode == "tacagent":
uri = get_mesh_ws_url()
agent.recover(mode, uri, wait=False)
return Response("Recovery will be attempted shortly")
elif mode == "mesh":
r, err = agent.recover(mode, "")
if err:
return notify_error(f"Unable to complete recovery: {r}")
return Response("Successfully completed recovery") | null |
189,098 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
class AuditLog(models.Model):
id = models.BigAutoField(primary_key=True)
username = models.CharField(max_length=255)
agent = models.CharField(max_length=255, null=True, blank=True)
agent_id = models.CharField(max_length=255, blank=True, null=True)
entry_time = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=100, choices=AuditActionType.choices)
object_type = models.CharField(max_length=100, choices=AuditObjType.choices)
before_value = models.JSONField(null=True, blank=True)
after_value = models.JSONField(null=True, blank=True)
message = models.CharField(max_length=255, null=True, blank=True)
debug_info = models.JSONField(null=True, blank=True)
def __str__(self) -> str:
return f"{self.username} {self.action} {self.object_type}"
def save(self, *args: Any, **kwargs: Any) -> None:
if not self.pk and self.message:
# truncate message field if longer than 255 characters
self.message = (
(self.message[:253] + "..") if len(self.message) > 255 else self.message
)
return super().save(*args, **kwargs)
def audit_mesh_session(
username: str, agent: "Agent", debug_info: Dict[Any, Any] = {}
) -> None:
AuditLog.objects.create(
username=username,
agent=agent.hostname,
agent_id=agent.agent_id,
object_type=AuditObjType.AGENT,
action=AuditActionType.REMOTE_SESSION,
message=f"{username} used Mesh Central to initiate a remote session to {agent.hostname}.",
debug_info=debug_info,
)
def audit_raw_command(
username: str,
agent: "Agent",
cmd: str,
shell: str,
debug_info: Dict[Any, Any] = {},
) -> None:
AuditLog.objects.create(
username=username,
agent=agent.hostname,
agent_id=agent.agent_id,
object_type=AuditObjType.AGENT,
action=AuditActionType.EXEC_COMMAND,
message=f"{username} issued {shell} command on {agent.hostname}.",
after_value=cmd,
debug_info=debug_info,
)
def audit_object_changed(
username: str,
object_type: str,
before: Dict[Any, Any],
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
AuditLog.objects.create(
username=username,
object_type=object_type,
agent=before["hostname"] if object_type == AuditObjType.AGENT else None,
agent_id=before["agent_id"] if object_type == AuditObjType.AGENT else None,
action=AuditActionType.MODIFY,
message=f"{username} modified {object_type} {name}",
before_value=before,
after_value=after,
debug_info=debug_info,
)
def audit_object_add(
username: str,
object_type: str,
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
AuditLog.objects.create(
username=username,
object_type=object_type,
agent=after["hostname"] if object_type == AuditObjType.AGENT else None,
agent_id=after["agent_id"] if object_type == AuditObjType.AGENT else None,
action=AuditActionType.ADD,
message=f"{username} added {object_type} {name}",
after_value=after,
debug_info=debug_info,
)
def audit_object_delete(
username: str,
object_type: str,
before: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
AuditLog.objects.create(
username=username,
object_type=object_type,
agent=before["hostname"] if object_type == AuditObjType.AGENT else None,
agent_id=before["agent_id"] if object_type == AuditObjType.AGENT else None,
action=AuditActionType.DELETE,
message=f"{username} deleted {object_type} {name}",
before_value=before,
debug_info=debug_info,
)
def audit_script_run(
username: str, agent: "Agent", script: str, debug_info: Dict[Any, Any] = {}
) -> None:
AuditLog.objects.create(
agent=agent.hostname,
agent_id=agent.agent_id,
username=username,
object_type=AuditObjType.AGENT,
action=AuditActionType.EXEC_SCRIPT,
message=f'{username} ran script: "{script}" on {agent.hostname}',
debug_info=debug_info,
)
def audit_user_failed_login(username: str, debug_info: Dict[Any, Any] = {}) -> None:
AuditLog.objects.create(
username=username,
object_type=AuditObjType.USER,
action=AuditActionType.FAILED_LOGIN,
message=f"{username} failed to login: Credentials were rejected",
debug_info=debug_info,
)
def audit_user_failed_twofactor(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
AuditLog.objects.create(
username=username,
object_type=AuditObjType.USER,
action=AuditActionType.FAILED_LOGIN,
message=f"{username} failed to login: Two Factor token rejected",
debug_info=debug_info,
)
def audit_user_login_successful(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
AuditLog.objects.create(
username=username,
object_type=AuditObjType.USER,
action=AuditActionType.LOGIN,
message=f"{username} logged in successfully",
debug_info=debug_info,
)
def audit_url_action(
username: str,
urlaction: "URLAction",
instance: "Union[Agent, Client, Site]",
debug_info: Dict[Any, Any] = {},
) -> None:
from agents.models import Agent
name = instance.hostname if isinstance(instance, Agent) else instance.name
classname = type(instance).__name__
AuditLog.objects.create(
username=username,
agent=name if isinstance(instance, Agent) else None,
agent_id=instance.agent_id if isinstance(instance, Agent) else None,
object_type=classname.lower(),
action=AuditActionType.URL_ACTION,
message=f"{username} ran url action: {urlaction.pattern} on {classname}: {name}",
debug_info=debug_info,
)
def audit_bulk_action(
username: str,
action: str,
affected: Dict[str, Any],
debug_info: Dict[Any, Any] = {},
) -> None:
from agents.models import Agent
from clients.models import Client, Site
from scripts.models import Script
target = ""
agents = None
if affected["target"] == "all":
target = "on all agents"
elif affected["target"] == "client":
client = Client.objects.get(pk=affected["client"])
target = f"on all agents within client: {client.name}"
elif affected["target"] == "site":
site = Site.objects.get(pk=affected["site"])
target = f"on all agents within site: {site.client.name}\\{site.name}"
elif affected["target"] == "agents":
agents = Agent.objects.filter(agent_id__in=affected["agents"]).values_list(
"hostname", flat=True
)
target = "on multiple agents"
if action == "script":
script = Script.objects.get(pk=affected["script"])
action = f"script: {script.name}"
if agents:
affected["agent_hostnames"] = list(agents)
AuditLog.objects.create(
username=username,
object_type=AuditObjType.BULK,
action=AuditActionType.BULK_ACTION,
message=f"{username} executed bulk {action} {target}",
debug_info=debug_info,
after_value=affected,
)
class Script(BaseAuditModel):
guid = models.CharField(max_length=64, null=True, blank=True)
name = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True, default="")
filename = models.CharField(max_length=255, null=True, blank=True)
shell = models.CharField(
max_length=100, choices=ScriptShell.choices, default=ScriptShell.POWERSHELL
)
script_type = models.CharField(
max_length=100, choices=ScriptType.choices, default=ScriptType.USER_DEFINED
)
args = ArrayField(
models.TextField(null=True, blank=True),
null=True,
blank=True,
default=list,
)
env_vars = ArrayField(
models.TextField(null=True, blank=True),
null=True,
blank=True,
default=list,
)
syntax = TextField(null=True, blank=True)
favorite = models.BooleanField(default=False)
category = models.CharField(max_length=100, null=True, blank=True)
script_body = models.TextField(blank=True, default="")
script_hash = models.CharField(max_length=100, null=True, blank=True)
code_base64 = models.TextField(blank=True, default="") # deprecated
default_timeout = models.PositiveIntegerField(default=90)
hidden = models.BooleanField(default=False)
supported_platforms = ArrayField(
models.CharField(max_length=20), null=True, blank=True, default=list
)
run_as_user = models.BooleanField(default=False)
def __str__(self):
return self.name
def code_no_snippets(self):
return self.script_body or ""
def code(self):
return self.replace_with_snippets(self.code_no_snippets)
def replace_with_snippets(cls, code):
# check if snippet has been added to script body
matches = re.finditer(r"{{(.*)}}", code)
if matches:
replaced_code = code
for snippet in matches:
snippet_name = snippet.group(1).strip()
if ScriptSnippet.objects.filter(name=snippet_name).exists():
value = ScriptSnippet.objects.get(name=snippet_name).code
else:
value = ""
replaced_code = re.sub(
snippet.group(), value.replace("\\", "\\\\"), replaced_code
)
return replaced_code
return code
def hash_script_body(self):
from django.conf import settings
msg = self.code.encode(errors="ignore")
return hmac.new(settings.SECRET_KEY.encode(), msg, hashlib.sha256).hexdigest()
def load_community_scripts(cls):
import json
import os
from django.conf import settings
# load community uploaded scripts into the database
# skip ones that already exist, only updating name / desc in case it changes
# for install script
scripts_dir = os.path.join(settings.SCRIPTS_DIR, "scripts")
with open(os.path.join(settings.SCRIPTS_DIR, "community_scripts.json")) as f:
info = json.load(f)
# used to remove scripts from DB that are removed from the json file and file system
community_scripts_processed = [] # list of script guids
for script in info:
if os.path.exists(os.path.join(scripts_dir, script["filename"])):
s = cls.objects.filter(
script_type=ScriptType.BUILT_IN, guid=script["guid"]
)
category = (
script["category"] if "category" in script.keys() else "Community"
)
default_timeout = (
int(script["default_timeout"])
if "default_timeout" in script.keys()
else 90
)
args = script["args"] if "args" in script.keys() else []
syntax = script["syntax"] if "syntax" in script.keys() else ""
supported_platforms = (
script["supported_platforms"]
if "supported_platforms" in script.keys()
else []
)
# if community script exists update it
if s.exists():
i: Script = s.get()
i.name = script["name"]
i.description = script["description"]
i.category = category
i.shell = script["shell"]
i.default_timeout = default_timeout
i.args = args
i.syntax = syntax
i.filename = script["filename"]
i.supported_platforms = supported_platforms
with open(os.path.join(scripts_dir, script["filename"]), "rb") as f:
i.script_body = f.read().decode("utf-8")
# i.hash_script_body()
i.save()
community_scripts_processed.append(i.guid)
# doesn't exist in database so create it
else:
print(f"Adding new community script: {script['name']}")
with open(os.path.join(scripts_dir, script["filename"]), "rb") as f:
script_body = f.read().decode("utf-8")
new_script: Script = cls(
script_body=script_body,
guid=script["guid"],
name=script["name"],
description=script["description"],
shell=script["shell"],
script_type=ScriptType.BUILT_IN,
category=category,
default_timeout=default_timeout,
args=args,
filename=script["filename"],
syntax=syntax,
supported_platforms=supported_platforms,
)
# new_script.hash_script_body() # also saves script
new_script.save()
community_scripts_processed.append(new_script.guid)
# check for community scripts that were deleted from json and scripts folder
count, _ = (
Script.objects.filter(script_type=ScriptType.BUILT_IN)
.exclude(guid__in=community_scripts_processed)
.delete()
)
if count:
print(
f"Removing {count} community scripts that was removed from source repo"
)
def serialize(script):
# serializes the script and returns json
from .serializers import ScriptSerializer
return ScriptSerializer(script).data
# TODO refactor common functionality of parse functions
def parse_script_args(cls, agent, shell: str, args: List[str] = []) -> list:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
# only get the match between the () in regex
string = match.group(1)
value = replace_arg_db_values(
string=string,
instance=agent,
shell=shell,
quotes=shell != ScriptShell.CMD,
)
if value:
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(
re.sub("\\{\\{.*\\}\\}", re.escape(value), arg)
)
else:
# pass parameter unaltered
temp_args.append(arg)
else:
temp_args.append(arg)
return temp_args
# TODO refactor common functionality of parse functions
def parse_script_env_vars(cls, agent, shell: str, env_vars: list[str] = []) -> list:
if not env_vars:
return []
temp_env_vars = []
pattern = re.compile(".*\\{\\{(.*)\\}\\}.*")
for env_var in env_vars:
# must be in format KEY=VALUE
try:
env_key = env_var.split("=")[0]
env_val = env_var.split("=")[1]
except:
continue
if match := pattern.match(env_val):
string = match.group(1)
value = replace_arg_db_values(
string=string,
instance=agent,
shell=shell,
quotes=False,
)
if value:
try:
new_val = re.sub("\\{\\{.*\\}\\}", value, env_val)
except re.error:
new_val = re.sub("\\{\\{.*\\}\\}", re.escape(value), env_val)
temp_env_vars.append(f"{env_key}={new_val}")
else:
# pass parameter unaltered
temp_env_vars.append(env_var)
return temp_env_vars
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class Note(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
Agent,
related_name="notes",
on_delete=models.CASCADE,
)
user = models.ForeignKey(
"accounts.User",
related_name="user",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
note = models.TextField(null=True, blank=True)
entry_time = models.DateTimeField(auto_now_add=True)
def __str__(self) -> str:
return self.agent.hostname
class AgentHistory(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
Agent,
related_name="history",
on_delete=models.CASCADE,
)
time = models.DateTimeField(auto_now_add=True)
type: "AgentHistoryType" = models.CharField(
max_length=50,
choices=AgentHistoryType.choices,
default=AgentHistoryType.CMD_RUN,
)
command = models.TextField(null=True, blank=True, default="")
username = models.CharField(max_length=255, default="system")
results = models.TextField(null=True, blank=True)
script = models.ForeignKey(
"scripts.Script",
null=True,
blank=True,
related_name="history",
on_delete=models.SET_NULL,
)
script_results = models.JSONField(null=True, blank=True)
def __str__(self) -> str:
return f"{self.agent.hostname} - {self.type}"
def run_script_email_results_task(
agentpk: int,
scriptpk: int,
nats_timeout: int,
emails: list[str],
args: list[str] = [],
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
):
agent = Agent.objects.get(pk=agentpk)
script = Script.objects.get(pk=scriptpk)
r = agent.run_script(
scriptpk=script.pk,
args=args,
full=True,
timeout=nats_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
env_vars=env_vars,
)
if r == "timeout":
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"{agent.hostname}({agent.pk}) timed out running script.",
)
return
CORE = get_core_settings()
subject = f"{agent.hostname} {script.name} Results"
exec_time = "{:.4f}".format(r["execution_time"])
body = (
subject
+ f"\nReturn code: {r['retcode']}\nExecution time: {exec_time} seconds\nStdout: {r['stdout']}\nStderr: {r['stderr']}"
)
import smtplib
from email.message import EmailMessage
msg = EmailMessage()
msg["Subject"] = subject
msg["From"] = CORE.smtp_from_email
if emails:
msg["To"] = ", ".join(emails)
else:
msg["To"] = ", ".join(CORE.email_alert_recipients)
msg.set_content(body)
try:
with smtplib.SMTP(CORE.smtp_host, CORE.smtp_port, timeout=20) as server:
if CORE.smtp_requires_auth:
server.ehlo()
server.starttls()
server.login(CORE.smtp_host_user, CORE.smtp_host_password)
server.send_message(msg)
server.quit()
else:
server.send_message(msg)
server.quit()
except Exception as e:
DebugLog.error(message=str(e))
class CustomField(BaseAuditModel):
order = models.PositiveIntegerField(default=0)
model = models.CharField(max_length=25, choices=CustomFieldModel.choices)
type = models.CharField(
max_length=25, choices=CustomFieldType.choices, default=CustomFieldType.TEXT
)
options = ArrayField(
models.CharField(max_length=255, null=True, blank=True),
null=True,
blank=True,
default=list,
)
name = models.CharField(max_length=100)
required = models.BooleanField(blank=True, default=False)
default_value_string = models.TextField(null=True, blank=True)
default_value_bool = models.BooleanField(default=False)
default_values_multiple = ArrayField(
models.CharField(max_length=255, null=True, blank=True),
null=True,
blank=True,
default=list,
)
hide_in_ui = models.BooleanField(default=False)
hide_in_summary = models.BooleanField(default=False)
class Meta:
unique_together = (("model", "name"),)
def __str__(self) -> str:
return self.name
def serialize(field):
from .serializers import CustomFieldSerializer
return CustomFieldSerializer(field).data
def default_value(self):
if self.type == CustomFieldType.MULTIPLE:
return self.default_values_multiple
elif self.type == CustomFieldType.CHECKBOX:
return self.default_value_bool
return self.default_value_string
def get_or_create_field_value(self, instance):
from agents.models import Agent, AgentCustomField
from clients.models import Client, ClientCustomField, Site, SiteCustomField
if isinstance(instance, Agent):
if AgentCustomField.objects.filter(field=self, agent=instance).exists():
return AgentCustomField.objects.get(field=self, agent=instance)
else:
return AgentCustomField.objects.create(field=self, agent=instance)
elif isinstance(instance, Client):
if ClientCustomField.objects.filter(field=self, client=instance).exists():
return ClientCustomField.objects.get(field=self, client=instance)
else:
return ClientCustomField.objects.create(field=self, client=instance)
elif isinstance(instance, Site):
if SiteCustomField.objects.filter(field=self, site=instance).exists():
return SiteCustomField.objects.get(field=self, site=instance)
else:
return SiteCustomField.objects.create(field=self, site=instance)
def run_script(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
script = get_object_or_404(Script, pk=request.data["script"])
output = request.data["output"]
args = request.data["args"]
run_as_user: bool = request.data["run_as_user"]
env_vars: list[str] = request.data["env_vars"]
req_timeout = int(request.data["timeout"]) + 3
AuditLog.audit_script_run(
username=request.user.username,
agent=agent,
script=script.name,
debug_info={"ip": request._client_ip},
)
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=script,
username=request.user.username[:50],
)
history_pk = hist.pk
if output == "wait":
r = agent.run_script(
scriptpk=script.pk,
args=args,
timeout=req_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
env_vars=env_vars,
)
return Response(r)
elif output == "email":
emails = (
[] if request.data["emailMode"] == "default" else request.data["emails"]
)
run_script_email_results_task.delay(
agentpk=agent.pk,
scriptpk=script.pk,
nats_timeout=req_timeout,
emails=emails,
args=args,
history_pk=history_pk,
run_as_user=run_as_user,
env_vars=env_vars,
)
elif output == "collector":
from core.models import CustomField
r = agent.run_script(
scriptpk=script.pk,
args=args,
timeout=req_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
env_vars=env_vars,
)
custom_field = CustomField.objects.get(pk=request.data["custom_field"])
if custom_field.model == CustomFieldModel.AGENT:
field = custom_field.get_or_create_field_value(agent)
elif custom_field.model == CustomFieldModel.CLIENT:
field = custom_field.get_or_create_field_value(agent.client)
elif custom_field.model == CustomFieldModel.SITE:
field = custom_field.get_or_create_field_value(agent.site)
else:
return notify_error("Custom Field was invalid")
value = (
r.strip()
if request.data["save_all_output"]
else r.strip().split("\n")[-1].strip()
)
field.save_to_field(value)
return Response(r)
elif output == "note":
r = agent.run_script(
scriptpk=script.pk,
args=args,
timeout=req_timeout,
wait=True,
history_pk=history_pk,
run_as_user=run_as_user,
env_vars=env_vars,
)
Note.objects.create(agent=agent, user=request.user, note=r)
return Response(r)
else:
agent.run_script(
scriptpk=script.pk,
args=args,
timeout=req_timeout,
history_pk=history_pk,
run_as_user=run_as_user,
env_vars=env_vars,
)
return Response(f"{script.name} will now be run on {agent.hostname}") | null |
189,099 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
class AuditLog(models.Model):
def __str__(self) -> str:
def save(self, *args: Any, **kwargs: Any) -> None:
def audit_mesh_session(
username: str, agent: "Agent", debug_info: Dict[Any, Any] = {}
) -> None:
def audit_raw_command(
username: str,
agent: "Agent",
cmd: str,
shell: str,
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_changed(
username: str,
object_type: str,
before: Dict[Any, Any],
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_add(
username: str,
object_type: str,
after: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_object_delete(
username: str,
object_type: str,
before: Dict[Any, Any],
name: str = "",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_script_run(
username: str, agent: "Agent", script: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_user_failed_login(username: str, debug_info: Dict[Any, Any] = {}) -> None:
def audit_user_failed_twofactor(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_user_login_successful(
username: str, debug_info: Dict[Any, Any] = {}
) -> None:
def audit_url_action(
username: str,
urlaction: "URLAction",
instance: "Union[Agent, Client, Site]",
debug_info: Dict[Any, Any] = {},
) -> None:
def audit_bulk_action(
username: str,
action: str,
affected: Dict[str, Any],
debug_info: Dict[Any, Any] = {},
) -> None:
class Script(BaseAuditModel):
def __str__(self):
def code_no_snippets(self):
def code(self):
def replace_with_snippets(cls, code):
def hash_script_body(self):
def load_community_scripts(cls):
def serialize(script):
def parse_script_args(cls, agent, shell: str, args: List[str] = []) -> list:
def parse_script_env_vars(cls, agent, shell: str, env_vars: list[str] = []) -> list:
def bulk_command_task(
*,
agent_pks: list[int],
cmd: str,
shell: str,
timeout: int,
username: str,
run_as_user: bool = False,
) -> None:
def bulk_script_task(
*,
script_pk: int,
agent_pks: list[int],
args: list[str] = [],
timeout: int,
username: str,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> None:
def bulk_install_updates_task(pks: list[int]) -> None:
def bulk_check_for_updates_task(pks: list[int]) -> None:
class Agent(BaseAuditModel):
def __str__(self) -> str:
def save(self, *args, **kwargs):
def client(self) -> "Client":
def timezone(self) -> str:
def is_posix(self) -> bool:
def arch(self) -> Optional[str]:
def do_update(self, *, token: str = "", force: bool = False) -> str:
def status(self) -> str:
def checks(self) -> Dict[str, Any]:
def pending_actions_count(self) -> int:
def cpu_model(self) -> List[str]:
def graphics(self) -> str:
def local_ips(self) -> str:
def make_model(self) -> str:
def physical_disks(self) -> Sequence[Disk]:
def serial_number(self) -> str:
def hex_mesh_node_id(self) -> str:
def online_agents(cls, min_version: str = "") -> "List[Agent]":
def is_supported_script(self, platforms: List[str]) -> bool:
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
def check_run_interval(self) -> int:
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
def approve_updates(self) -> None:
def get_patch_policy(self) -> "WinUpdatePolicy":
def get_approved_update_guids(self) -> list[str]:
def set_alert_template(self) -> "Optional[AlertTemplate]":
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
def get_checks_from_policies(self) -> "List[Check]":
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
def serialize(agent: "Agent") -> Dict[str, Any]:
def delete_superseded_updates(self) -> None:
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
def send_outage_email(self) -> None:
def send_recovery_email(self) -> None:
def send_outage_sms(self) -> None:
def send_recovery_sms(self) -> None:
def bulk(request):
if request.data["target"] == "agents" and not request.data["agents"]:
return notify_error("Must select at least 1 agent")
if request.data["target"] == "client":
if not _has_perm_on_client(request.user, request.data["client"]):
raise PermissionDenied()
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
site__client_id=request.data["client"]
)
elif request.data["target"] == "site":
if not _has_perm_on_site(request.user, request.data["site"]):
raise PermissionDenied()
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
site_id=request.data["site"]
)
elif request.data["target"] == "agents":
q = Agent.objects.filter_by_role(request.user).filter( # type: ignore
agent_id__in=request.data["agents"]
)
elif request.data["target"] == "all":
q = Agent.objects.filter_by_role(request.user).only("pk", "monitoring_type") # type: ignore
else:
return notify_error("Something went wrong")
if request.data["monType"] == "servers":
q = q.filter(monitoring_type=AgentMonType.SERVER)
elif request.data["monType"] == "workstations":
q = q.filter(monitoring_type=AgentMonType.WORKSTATION)
if request.data["osType"] == AgentPlat.WINDOWS:
q = q.filter(plat=AgentPlat.WINDOWS)
elif request.data["osType"] == AgentPlat.LINUX:
q = q.filter(plat=AgentPlat.LINUX)
elif request.data["osType"] == AgentPlat.DARWIN:
q = q.filter(plat=AgentPlat.DARWIN)
agents: list[int] = [agent.pk for agent in q]
if not agents:
return notify_error("No agents were found meeting the selected criteria")
AuditLog.audit_bulk_action(
request.user,
request.data["mode"],
request.data,
debug_info={"ip": request._client_ip},
)
if request.data["mode"] == "command":
if request.data["shell"] == "custom" and request.data["custom_shell"]:
shell = request.data["custom_shell"]
else:
shell = request.data["shell"]
bulk_command_task.delay(
agent_pks=agents,
cmd=request.data["cmd"],
shell=shell,
timeout=request.data["timeout"],
username=request.user.username[:50],
run_as_user=request.data["run_as_user"],
)
return Response(f"Command will now be run on {len(agents)} agents")
elif request.data["mode"] == "script":
script = get_object_or_404(Script, pk=request.data["script"])
bulk_script_task.delay(
script_pk=script.pk,
agent_pks=agents,
args=request.data["args"],
timeout=request.data["timeout"],
username=request.user.username[:50],
run_as_user=request.data["run_as_user"],
env_vars=request.data["env_vars"],
)
return Response(f"{script.name} will now be run on {len(agents)} agents")
elif request.data["mode"] == "patch":
if request.data["patchMode"] == "install":
bulk_install_updates_task.delay(agents)
return Response(
f"Pending updates will now be installed on {len(agents)} agents"
)
elif request.data["patchMode"] == "scan":
bulk_check_for_updates_task.delay(agents)
return Response(f"Patch status scan will now run on {len(agents)} agents")
return notify_error("Something went wrong") | null |
189,100 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def agent_maintenance(request):
if request.data["type"] == "Client":
if not _has_perm_on_client(request.user, request.data["id"]):
raise PermissionDenied()
count = (
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(site__client_id=request.data["id"])
.update(maintenance_mode=request.data["action"])
)
elif request.data["type"] == "Site":
if not _has_perm_on_site(request.user, request.data["id"]):
raise PermissionDenied()
count = (
Agent.objects.filter_by_role(request.user) # type: ignore
.filter(site_id=request.data["id"])
.update(maintenance_mode=request.data["action"])
)
else:
return notify_error("Invalid data")
if count:
action = "disabled" if not request.data["action"] else "enabled"
return Response(f"Maintenance mode has been {action} on {count} agents")
return Response(
"No agents have been put in maintenance mode. You might not have permissions to the resources."
) | null |
189,101 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
def bulk_recover_agents_task() -> None:
def bulk_agent_recovery(request):
bulk_recover_agents_task.delay()
return Response("Agents will now be recovered") | null |
189,102 | import asyncio
import datetime as dt
import random
import string
import time
from io import StringIO
from pathlib import Path
from django.conf import settings
from django.db.models import Exists, OuterRef, Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from django.utils.dateparse import parse_datetime
from meshctrl.utils import get_login_token
from packaging import version as pyver
from rest_framework import serializers
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.tasks import sync_mesh_perms_task
from core.utils import (
get_core_settings,
get_mesh_ws_url,
remove_mesh_agent,
token_is_valid,
wake_on_lan,
)
from logs.models import AuditLog, DebugLog, PendingAction
from scripts.models import Script
from scripts.tasks import bulk_command_task, bulk_script_task
from tacticalrmm.constants import (
AGENT_DEFER,
AGENT_STATUS_OFFLINE,
AGENT_STATUS_ONLINE,
AGENT_TABLE_DEFER,
AgentHistoryType,
AgentMonType,
AgentPlat,
CustomFieldModel,
DebugLogType,
EvtLogNames,
PAAction,
)
from tacticalrmm.helpers import date_is_in_past, notify_error
from tacticalrmm.permissions import (
_has_perm_on_agent,
_has_perm_on_client,
_has_perm_on_site,
)
from tacticalrmm.utils import get_default_timezone, reload_nats
from winupdate.models import WinUpdate, WinUpdatePolicy
from winupdate.serializers import WinUpdatePolicySerializer
from winupdate.tasks import bulk_check_for_updates_task, bulk_install_updates_task
from .models import Agent, AgentCustomField, AgentHistory, Note
from .permissions import (
AgentHistoryPerms,
AgentNotesPerms,
AgentPerms,
AgentWOLPerms,
EvtLogPerms,
InstallAgentPerms,
ManageProcPerms,
MeshPerms,
RebootAgentPerms,
RecoverAgentPerms,
RunBulkPerms,
RunScriptPerms,
SendCMDPerms,
UpdateAgentPerms,
)
from .serializers import (
AgentCustomFieldSerializer,
AgentHistorySerializer,
AgentHostnameSerializer,
AgentNoteSerializer,
AgentSerializer,
AgentTableSerializer,
)
from .tasks import (
bulk_recover_agents_task,
run_script_email_results_task,
send_agent_update_task,
)
def get_mesh_ws_url() -> str:
async def wake_on_lan(*, uri: str, mesh_node_id: str) -> None:
class Agent(BaseAuditModel):
def __str__(self) -> str:
def save(self, *args, **kwargs):
def client(self) -> "Client":
def timezone(self) -> str:
def is_posix(self) -> bool:
def arch(self) -> Optional[str]:
def do_update(self, *, token: str = "", force: bool = False) -> str:
def status(self) -> str:
def checks(self) -> Dict[str, Any]:
def pending_actions_count(self) -> int:
def cpu_model(self) -> List[str]:
def graphics(self) -> str:
def local_ips(self) -> str:
def make_model(self) -> str:
def physical_disks(self) -> Sequence[Disk]:
def serial_number(self) -> str:
def hex_mesh_node_id(self) -> str:
def online_agents(cls, min_version: str = "") -> "List[Agent]":
def is_supported_script(self, platforms: List[str]) -> bool:
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
def check_run_interval(self) -> int:
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
def approve_updates(self) -> None:
def get_patch_policy(self) -> "WinUpdatePolicy":
def get_approved_update_guids(self) -> list[str]:
def set_alert_template(self) -> "Optional[AlertTemplate]":
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
def get_checks_from_policies(self) -> "List[Check]":
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
def serialize(agent: "Agent") -> Dict[str, Any]:
def delete_superseded_updates(self) -> None:
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
def send_outage_email(self) -> None:
def send_recovery_email(self) -> None:
def send_outage_sms(self) -> None:
def send_recovery_sms(self) -> None:
def wol(request, agent_id):
agent = get_object_or_404(
Agent.objects.defer(*AGENT_DEFER),
agent_id=agent_id,
)
try:
uri = get_mesh_ws_url()
asyncio.run(wake_on_lan(uri=uri, mesh_node_id=agent.mesh_node_id))
except Exception as e:
return notify_error(str(e))
return Response(f"Wake-on-LAN sent to {agent.hostname}") | null |
189,103 | import json
import os
import secrets
import string
from itertools import cycle
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery.recipe import Recipe, foreign_key, seq
from tacticalrmm.constants import AgentMonType, AgentPlat
def generate_agent_id() -> str:
return "".join(secrets.choice(string.ascii_letters) for i in range(39)) | null |
189,104 | import json
import os
import secrets
import string
from itertools import cycle
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery.recipe import Recipe, foreign_key, seq
from tacticalrmm.constants import AgentMonType, AgentPlat
def get_wmi_data():
with open(
os.path.join(settings.BASE_DIR, "tacticalrmm/test_data/wmi_python_agent.json")
) as f:
return json.load(f) | null |
189,105 | import json
import os
import secrets
import string
from itertools import cycle
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery.recipe import Recipe, foreign_key, seq
from tacticalrmm.constants import AgentMonType, AgentPlat
def get_win_svcs():
svcs = settings.BASE_DIR.joinpath("tacticalrmm/test_data/winsvcs.json")
with open(svcs) as f:
return json.load(f) | null |
189,106 | import asyncio
from typing import Any
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from agents.models import Agent
from logs.models import PendingAction
from tacticalrmm.constants import PAAction
from tacticalrmm.helpers import notify_error
from .models import ChocoSoftware, InstalledSoftware
from .permissions import SoftwarePerms
from .serializers import InstalledSoftwareSerializer
class ChocoSoftware(models.Model):
chocos = models.JSONField()
added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{len(self.chocos)} - {self.added}"
def chocos(request):
chocos = ChocoSoftware.objects.last()
if not chocos:
return Response({})
return Response(chocos.chocos) | null |
189,107 |
def bytes2human(n: int) -> str:
# http://code.activestate.com/recipes/578019
symbols = ("K", "M", "G", "T", "P", "E", "Z", "Y")
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return "%.1f%s" % (value, s)
return "%sB" % n | null |
189,108 | from django.db import migrations, transaction
from django.db.utils import IntegrityError
from tacticalrmm.constants import CheckType
def migrate_check_results(apps, schema_editor):
Check = apps.get_model("checks", "Check")
CheckResult = apps.get_model("checks", "CheckResult")
for check in Check.objects.exclude(agent=None).iterator():
try:
with transaction.atomic():
if check.managed_by_policy:
CheckResult.objects.create(
assigned_check_id=check.parent_check,
agent_id=check.agent.id,
status=check.status,
more_info=check.more_info,
last_run=check.last_run,
fail_count=check.fail_count,
outage_history=check.outage_history,
extra_details=check.extra_details,
stdout=check.stdout,
stderr=check.stderr,
retcode=check.retcode,
execution_time=check.execution_time,
history=check.history,
alert_severity=check.alert_severity
if check.check_type
in [
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
]
else None,
)
else:
CheckResult.objects.create(
assigned_check_id=check.id,
agent_id=check.agent.id,
status=check.status,
more_info=check.more_info,
last_run=check.last_run,
fail_count=check.fail_count,
outage_history=check.outage_history,
extra_details=check.extra_details,
stdout=check.stdout,
stderr=check.stderr,
retcode=check.retcode,
execution_time=check.execution_time,
history=check.history,
alert_severity=check.alert_severity
if check.check_type
in [
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
]
else None,
)
except IntegrityError:
pass | null |
189,109 | import datetime as dt
from time import sleep
from typing import Optional
from django.utils import timezone as djangotime
from alerts.models import Alert
from checks.models import CheckResult
from tacticalrmm.celery import app
from tacticalrmm.helpers import rand_range
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
class CheckResult(models.Model):
objects = PermissionQuerySet.as_manager()
class Meta:
unique_together = (("agent", "assigned_check"),)
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="checkresults",
on_delete=models.CASCADE,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="checkresults",
on_delete=models.CASCADE,
)
status = models.CharField(
max_length=100, choices=CheckStatus.choices, default=CheckStatus.PENDING
)
# for memory, diskspace, script, and cpu checks where severity changes
alert_severity = models.CharField(
max_length=15,
choices=AlertSeverity.choices,
null=True,
blank=True,
)
more_info = models.TextField(null=True, blank=True)
last_run = models.DateTimeField(null=True, blank=True)
fail_count = models.PositiveIntegerField(default=0)
outage_history = models.JSONField(null=True, blank=True) # store
extra_details = models.JSONField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
retcode = models.BigIntegerField(null=True, blank=True)
execution_time = models.CharField(max_length=100, null=True, blank=True)
# cpu and mem check history
history = ArrayField(
models.IntegerField(blank=True), null=True, blank=True, default=list
)
def __str__(self):
return f"{self.agent.hostname} - {self.assigned_check}"
def save(self, *args, **kwargs):
# if check is a policy check clear cache on everything
if not self.alert_severity and self.assigned_check.check_type in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
):
self.alert_severity = AlertSeverity.WARNING
super().save(*args, **kwargs)
def history_info(self):
if self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
return ", ".join(str(f"{x}%") for x in self.history[-6:])
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_check_alert(
self.assigned_check,
agent=self.agent,
alert_severity=self.alert_severity,
skip_create=not self.assigned_check.should_create_alert(alert_template),
)
def handle_check(self, data, check: "Check", agent: "Agent"):
from alerts.models import Alert
update_fields = []
# cpuload or mem checks
if check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
self.history.append(data["percent"])
if len(self.history) > 15:
self.history = self.history[-15:]
update_fields.extend(["history"])
avg = int(mean(self.history))
if check.error_threshold and avg > check.error_threshold:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
elif check.warning_threshold and avg > check.warning_threshold:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.WARNING
else:
self.status = CheckStatus.PASSING
# add check history
check.add_check_history(data["percent"], agent.agent_id)
# diskspace checks
elif check.check_type == CheckType.DISK_SPACE:
if data["exists"]:
percent_used = round(data["percent_used"])
if (
check.error_threshold
and (100 - percent_used) < check.error_threshold
):
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
elif (
check.warning_threshold
and (100 - percent_used) < check.warning_threshold
):
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.WARNING
else:
self.status = CheckStatus.PASSING
self.more_info = data["more_info"]
# add check history
check.add_check_history(100 - percent_used, agent.agent_id)
else:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
self.more_info = f"Disk {check.disk} does not exist"
update_fields.extend(["more_info"])
# script checks
elif check.check_type == CheckType.SCRIPT:
self.stdout = data["stdout"]
self.stderr = data["stderr"]
self.retcode = data["retcode"]
self.execution_time = "{:.4f}".format(data["runtime"])
if data["retcode"] in check.info_return_codes:
self.alert_severity = AlertSeverity.INFO
self.status = CheckStatus.FAILING
elif data["retcode"] in check.warning_return_codes:
self.alert_severity = AlertSeverity.WARNING
self.status = CheckStatus.FAILING
elif data["retcode"] != 0:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
else:
self.status = CheckStatus.PASSING
update_fields.extend(
[
"stdout",
"stderr",
"retcode",
"execution_time",
]
)
# add check history
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
{
"retcode": data["retcode"],
"stdout": data["stdout"][:60],
"stderr": data["stderr"][:60],
"execution_time": self.execution_time,
},
)
# ping checks
elif check.check_type == CheckType.PING:
self.status = data["status"]
self.more_info = data["output"]
update_fields.extend(["more_info"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
self.more_info[:60],
)
# windows service checks
elif check.check_type == CheckType.WINSVC:
self.status = data["status"]
self.more_info = data["more_info"]
update_fields.extend(["more_info"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
self.more_info[:60],
)
elif check.check_type == CheckType.EVENT_LOG:
log = data["log"]
if check.fail_when == EvtLogFailWhen.CONTAINS:
if log and len(log) >= check.number_of_events_b4_alert:
self.status = CheckStatus.FAILING
else:
self.status = CheckStatus.PASSING
elif check.fail_when == EvtLogFailWhen.NOT_CONTAINS:
if log and len(log) >= check.number_of_events_b4_alert:
self.status = CheckStatus.PASSING
else:
self.status = CheckStatus.FAILING
self.extra_details = {"log": log}
update_fields.extend(["extra_details"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
"Events Found:" + str(len(self.extra_details["log"])),
)
self.last_run = djangotime.now()
# handle status
if self.status == CheckStatus.FAILING:
self.fail_count += 1
update_fields.extend(["status", "fail_count", "alert_severity", "last_run"])
self.save(update_fields=update_fields)
if self.fail_count >= check.fails_b4_alert:
Alert.handle_alert_failure(self)
elif self.status == CheckStatus.PASSING:
self.fail_count = 0
update_fields.extend(["status", "fail_count", "alert_severity", "last_run"])
self.save(update_fields=update_fields)
if Alert.objects.filter(
assigned_check=check, agent=agent, resolved=False
).exists():
Alert.handle_alert_resolve(self)
else:
update_fields.extend(["last_run"])
self.save(update_fields=update_fields)
return self.status
def send_email(self):
CORE = get_core_settings()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
if self.assigned_check.check_type == CheckType.DISK_SPACE:
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
try:
percent_used = [
d["percent"]
for d in self.agent.disks
if d["device"] == self.assigned_check.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.assigned_check.disk} does not exist"
elif self.assigned_check.check_type == CheckType.SCRIPT:
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
elif self.assigned_check.check_type == CheckType.PING:
body = self.more_info
elif self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
avg = int(mean(self.history))
if self.assigned_check.check_type == CheckType.CPU_LOAD:
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.MEMORY:
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.WINSVC:
body = subject + f" - Status: {self.more_info}"
elif self.assigned_check.check_type == CheckType.EVENT_LOG:
if self.assigned_check.event_source and self.assigned_check.event_message:
start = f"Event ID {self.assigned_check.event_id}, source {self.assigned_check.event_source}, containing string {self.assigned_check.event_message} "
elif self.assigned_check.event_source:
start = f"Event ID {self.assigned_check.event_id}, source {self.assigned_check.event_source} "
elif self.assigned_check.event_message:
start = f"Event ID {self.assigned_check.event_id}, containing string {self.assigned_check.event_message} "
else:
start = f"Event ID {self.assigned_check.event_id} "
body = start + f"was found in the {self.assigned_check.log_name} log\n\n"
for i in self.extra_details["log"]:
try:
if i["message"]:
body += f"{i['message']}\n"
except:
continue
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Failed"
else:
subject = f"{self} Failed"
if self.assigned_check.check_type == CheckType.DISK_SPACE:
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
try:
percent_used = [
d["percent"]
for d in self.agent.disks
if d["device"] == self.assigned_check.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.assigned_check.disk} does not exist"
elif self.assigned_check.check_type == CheckType.SCRIPT:
body = subject + f" - Return code: {self.retcode}"
elif self.assigned_check.check_type == CheckType.PING:
body = subject
elif self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
avg = int(mean(self.history))
if self.assigned_check.check_type == CheckType.CPU_LOAD:
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.MEMORY:
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.WINSVC:
body = subject + f" - Status: {self.more_info}"
elif self.assigned_check.check_type == CheckType.EVENT_LOG:
body = subject
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = f"{self} is now back to normal"
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
CORE.send_sms(subject, alert_template=self.agent.alert_template)
def handle_check_email_alert_task(
pk: int, alert_interval: Optional[float] = None
) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending email
if not alert.email_sent:
check_result = CheckResult.objects.get(
assigned_check=alert.assigned_check, agent=alert.agent
)
sleep(rand_range(100, 1500))
check_result.send_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
else:
if alert_interval:
# send an email only if the last email sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.email_sent < delta:
check_result = CheckResult.objects.get(
assigned_check=alert.assigned_check, agent=alert.agent
)
sleep(rand_range(100, 1500))
check_result.send_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
return "ok" | null |
189,110 | import datetime as dt
from time import sleep
from typing import Optional
from django.utils import timezone as djangotime
from alerts.models import Alert
from checks.models import CheckResult
from tacticalrmm.celery import app
from tacticalrmm.helpers import rand_range
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
class CheckResult(models.Model):
objects = PermissionQuerySet.as_manager()
class Meta:
unique_together = (("agent", "assigned_check"),)
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="checkresults",
on_delete=models.CASCADE,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="checkresults",
on_delete=models.CASCADE,
)
status = models.CharField(
max_length=100, choices=CheckStatus.choices, default=CheckStatus.PENDING
)
# for memory, diskspace, script, and cpu checks where severity changes
alert_severity = models.CharField(
max_length=15,
choices=AlertSeverity.choices,
null=True,
blank=True,
)
more_info = models.TextField(null=True, blank=True)
last_run = models.DateTimeField(null=True, blank=True)
fail_count = models.PositiveIntegerField(default=0)
outage_history = models.JSONField(null=True, blank=True) # store
extra_details = models.JSONField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
retcode = models.BigIntegerField(null=True, blank=True)
execution_time = models.CharField(max_length=100, null=True, blank=True)
# cpu and mem check history
history = ArrayField(
models.IntegerField(blank=True), null=True, blank=True, default=list
)
def __str__(self):
return f"{self.agent.hostname} - {self.assigned_check}"
def save(self, *args, **kwargs):
# if check is a policy check clear cache on everything
if not self.alert_severity and self.assigned_check.check_type in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
):
self.alert_severity = AlertSeverity.WARNING
super().save(*args, **kwargs)
def history_info(self):
if self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
return ", ".join(str(f"{x}%") for x in self.history[-6:])
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_check_alert(
self.assigned_check,
agent=self.agent,
alert_severity=self.alert_severity,
skip_create=not self.assigned_check.should_create_alert(alert_template),
)
def handle_check(self, data, check: "Check", agent: "Agent"):
from alerts.models import Alert
update_fields = []
# cpuload or mem checks
if check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
self.history.append(data["percent"])
if len(self.history) > 15:
self.history = self.history[-15:]
update_fields.extend(["history"])
avg = int(mean(self.history))
if check.error_threshold and avg > check.error_threshold:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
elif check.warning_threshold and avg > check.warning_threshold:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.WARNING
else:
self.status = CheckStatus.PASSING
# add check history
check.add_check_history(data["percent"], agent.agent_id)
# diskspace checks
elif check.check_type == CheckType.DISK_SPACE:
if data["exists"]:
percent_used = round(data["percent_used"])
if (
check.error_threshold
and (100 - percent_used) < check.error_threshold
):
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
elif (
check.warning_threshold
and (100 - percent_used) < check.warning_threshold
):
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.WARNING
else:
self.status = CheckStatus.PASSING
self.more_info = data["more_info"]
# add check history
check.add_check_history(100 - percent_used, agent.agent_id)
else:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
self.more_info = f"Disk {check.disk} does not exist"
update_fields.extend(["more_info"])
# script checks
elif check.check_type == CheckType.SCRIPT:
self.stdout = data["stdout"]
self.stderr = data["stderr"]
self.retcode = data["retcode"]
self.execution_time = "{:.4f}".format(data["runtime"])
if data["retcode"] in check.info_return_codes:
self.alert_severity = AlertSeverity.INFO
self.status = CheckStatus.FAILING
elif data["retcode"] in check.warning_return_codes:
self.alert_severity = AlertSeverity.WARNING
self.status = CheckStatus.FAILING
elif data["retcode"] != 0:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
else:
self.status = CheckStatus.PASSING
update_fields.extend(
[
"stdout",
"stderr",
"retcode",
"execution_time",
]
)
# add check history
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
{
"retcode": data["retcode"],
"stdout": data["stdout"][:60],
"stderr": data["stderr"][:60],
"execution_time": self.execution_time,
},
)
# ping checks
elif check.check_type == CheckType.PING:
self.status = data["status"]
self.more_info = data["output"]
update_fields.extend(["more_info"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
self.more_info[:60],
)
# windows service checks
elif check.check_type == CheckType.WINSVC:
self.status = data["status"]
self.more_info = data["more_info"]
update_fields.extend(["more_info"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
self.more_info[:60],
)
elif check.check_type == CheckType.EVENT_LOG:
log = data["log"]
if check.fail_when == EvtLogFailWhen.CONTAINS:
if log and len(log) >= check.number_of_events_b4_alert:
self.status = CheckStatus.FAILING
else:
self.status = CheckStatus.PASSING
elif check.fail_when == EvtLogFailWhen.NOT_CONTAINS:
if log and len(log) >= check.number_of_events_b4_alert:
self.status = CheckStatus.PASSING
else:
self.status = CheckStatus.FAILING
self.extra_details = {"log": log}
update_fields.extend(["extra_details"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
"Events Found:" + str(len(self.extra_details["log"])),
)
self.last_run = djangotime.now()
# handle status
if self.status == CheckStatus.FAILING:
self.fail_count += 1
update_fields.extend(["status", "fail_count", "alert_severity", "last_run"])
self.save(update_fields=update_fields)
if self.fail_count >= check.fails_b4_alert:
Alert.handle_alert_failure(self)
elif self.status == CheckStatus.PASSING:
self.fail_count = 0
update_fields.extend(["status", "fail_count", "alert_severity", "last_run"])
self.save(update_fields=update_fields)
if Alert.objects.filter(
assigned_check=check, agent=agent, resolved=False
).exists():
Alert.handle_alert_resolve(self)
else:
update_fields.extend(["last_run"])
self.save(update_fields=update_fields)
return self.status
def send_email(self):
CORE = get_core_settings()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
if self.assigned_check.check_type == CheckType.DISK_SPACE:
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
try:
percent_used = [
d["percent"]
for d in self.agent.disks
if d["device"] == self.assigned_check.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.assigned_check.disk} does not exist"
elif self.assigned_check.check_type == CheckType.SCRIPT:
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
elif self.assigned_check.check_type == CheckType.PING:
body = self.more_info
elif self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
avg = int(mean(self.history))
if self.assigned_check.check_type == CheckType.CPU_LOAD:
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.MEMORY:
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.WINSVC:
body = subject + f" - Status: {self.more_info}"
elif self.assigned_check.check_type == CheckType.EVENT_LOG:
if self.assigned_check.event_source and self.assigned_check.event_message:
start = f"Event ID {self.assigned_check.event_id}, source {self.assigned_check.event_source}, containing string {self.assigned_check.event_message} "
elif self.assigned_check.event_source:
start = f"Event ID {self.assigned_check.event_id}, source {self.assigned_check.event_source} "
elif self.assigned_check.event_message:
start = f"Event ID {self.assigned_check.event_id}, containing string {self.assigned_check.event_message} "
else:
start = f"Event ID {self.assigned_check.event_id} "
body = start + f"was found in the {self.assigned_check.log_name} log\n\n"
for i in self.extra_details["log"]:
try:
if i["message"]:
body += f"{i['message']}\n"
except:
continue
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Failed"
else:
subject = f"{self} Failed"
if self.assigned_check.check_type == CheckType.DISK_SPACE:
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
try:
percent_used = [
d["percent"]
for d in self.agent.disks
if d["device"] == self.assigned_check.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.assigned_check.disk} does not exist"
elif self.assigned_check.check_type == CheckType.SCRIPT:
body = subject + f" - Return code: {self.retcode}"
elif self.assigned_check.check_type == CheckType.PING:
body = subject
elif self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
avg = int(mean(self.history))
if self.assigned_check.check_type == CheckType.CPU_LOAD:
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.MEMORY:
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.WINSVC:
body = subject + f" - Status: {self.more_info}"
elif self.assigned_check.check_type == CheckType.EVENT_LOG:
body = subject
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = f"{self} is now back to normal"
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
CORE.send_sms(subject, alert_template=self.agent.alert_template)
def handle_check_sms_alert_task(pk: int, alert_interval: Optional[float] = None) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending text
if not alert.sms_sent:
check_result = CheckResult.objects.get(
assigned_check=alert.assigned_check, agent=alert.agent
)
sleep(rand_range(100, 1500))
check_result.send_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
else:
if alert_interval:
# send a text only if the last text sent is older than 24 hours
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.sms_sent < delta:
check_result = CheckResult.objects.get(
assigned_check=alert.assigned_check, agent=alert.agent
)
sleep(rand_range(100, 1500))
check_result.send_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
return "ok" | null |
189,111 | import datetime as dt
from time import sleep
from typing import Optional
from django.utils import timezone as djangotime
from alerts.models import Alert
from checks.models import CheckResult
from tacticalrmm.celery import app
from tacticalrmm.helpers import rand_range
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
class CheckResult(models.Model):
objects = PermissionQuerySet.as_manager()
class Meta:
unique_together = (("agent", "assigned_check"),)
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="checkresults",
on_delete=models.CASCADE,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="checkresults",
on_delete=models.CASCADE,
)
status = models.CharField(
max_length=100, choices=CheckStatus.choices, default=CheckStatus.PENDING
)
# for memory, diskspace, script, and cpu checks where severity changes
alert_severity = models.CharField(
max_length=15,
choices=AlertSeverity.choices,
null=True,
blank=True,
)
more_info = models.TextField(null=True, blank=True)
last_run = models.DateTimeField(null=True, blank=True)
fail_count = models.PositiveIntegerField(default=0)
outage_history = models.JSONField(null=True, blank=True) # store
extra_details = models.JSONField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
retcode = models.BigIntegerField(null=True, blank=True)
execution_time = models.CharField(max_length=100, null=True, blank=True)
# cpu and mem check history
history = ArrayField(
models.IntegerField(blank=True), null=True, blank=True, default=list
)
def __str__(self):
return f"{self.agent.hostname} - {self.assigned_check}"
def save(self, *args, **kwargs):
# if check is a policy check clear cache on everything
if not self.alert_severity and self.assigned_check.check_type in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
):
self.alert_severity = AlertSeverity.WARNING
super().save(*args, **kwargs)
def history_info(self):
if self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
return ", ".join(str(f"{x}%") for x in self.history[-6:])
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_check_alert(
self.assigned_check,
agent=self.agent,
alert_severity=self.alert_severity,
skip_create=not self.assigned_check.should_create_alert(alert_template),
)
def handle_check(self, data, check: "Check", agent: "Agent"):
from alerts.models import Alert
update_fields = []
# cpuload or mem checks
if check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
self.history.append(data["percent"])
if len(self.history) > 15:
self.history = self.history[-15:]
update_fields.extend(["history"])
avg = int(mean(self.history))
if check.error_threshold and avg > check.error_threshold:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
elif check.warning_threshold and avg > check.warning_threshold:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.WARNING
else:
self.status = CheckStatus.PASSING
# add check history
check.add_check_history(data["percent"], agent.agent_id)
# diskspace checks
elif check.check_type == CheckType.DISK_SPACE:
if data["exists"]:
percent_used = round(data["percent_used"])
if (
check.error_threshold
and (100 - percent_used) < check.error_threshold
):
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
elif (
check.warning_threshold
and (100 - percent_used) < check.warning_threshold
):
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.WARNING
else:
self.status = CheckStatus.PASSING
self.more_info = data["more_info"]
# add check history
check.add_check_history(100 - percent_used, agent.agent_id)
else:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
self.more_info = f"Disk {check.disk} does not exist"
update_fields.extend(["more_info"])
# script checks
elif check.check_type == CheckType.SCRIPT:
self.stdout = data["stdout"]
self.stderr = data["stderr"]
self.retcode = data["retcode"]
self.execution_time = "{:.4f}".format(data["runtime"])
if data["retcode"] in check.info_return_codes:
self.alert_severity = AlertSeverity.INFO
self.status = CheckStatus.FAILING
elif data["retcode"] in check.warning_return_codes:
self.alert_severity = AlertSeverity.WARNING
self.status = CheckStatus.FAILING
elif data["retcode"] != 0:
self.status = CheckStatus.FAILING
self.alert_severity = AlertSeverity.ERROR
else:
self.status = CheckStatus.PASSING
update_fields.extend(
[
"stdout",
"stderr",
"retcode",
"execution_time",
]
)
# add check history
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
{
"retcode": data["retcode"],
"stdout": data["stdout"][:60],
"stderr": data["stderr"][:60],
"execution_time": self.execution_time,
},
)
# ping checks
elif check.check_type == CheckType.PING:
self.status = data["status"]
self.more_info = data["output"]
update_fields.extend(["more_info"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
self.more_info[:60],
)
# windows service checks
elif check.check_type == CheckType.WINSVC:
self.status = data["status"]
self.more_info = data["more_info"]
update_fields.extend(["more_info"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
self.more_info[:60],
)
elif check.check_type == CheckType.EVENT_LOG:
log = data["log"]
if check.fail_when == EvtLogFailWhen.CONTAINS:
if log and len(log) >= check.number_of_events_b4_alert:
self.status = CheckStatus.FAILING
else:
self.status = CheckStatus.PASSING
elif check.fail_when == EvtLogFailWhen.NOT_CONTAINS:
if log and len(log) >= check.number_of_events_b4_alert:
self.status = CheckStatus.PASSING
else:
self.status = CheckStatus.FAILING
self.extra_details = {"log": log}
update_fields.extend(["extra_details"])
check.add_check_history(
1 if self.status == CheckStatus.FAILING else 0,
agent.agent_id,
"Events Found:" + str(len(self.extra_details["log"])),
)
self.last_run = djangotime.now()
# handle status
if self.status == CheckStatus.FAILING:
self.fail_count += 1
update_fields.extend(["status", "fail_count", "alert_severity", "last_run"])
self.save(update_fields=update_fields)
if self.fail_count >= check.fails_b4_alert:
Alert.handle_alert_failure(self)
elif self.status == CheckStatus.PASSING:
self.fail_count = 0
update_fields.extend(["status", "fail_count", "alert_severity", "last_run"])
self.save(update_fields=update_fields)
if Alert.objects.filter(
assigned_check=check, agent=agent, resolved=False
).exists():
Alert.handle_alert_resolve(self)
else:
update_fields.extend(["last_run"])
self.save(update_fields=update_fields)
return self.status
def send_email(self):
CORE = get_core_settings()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
if self.assigned_check.check_type == CheckType.DISK_SPACE:
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
try:
percent_used = [
d["percent"]
for d in self.agent.disks
if d["device"] == self.assigned_check.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.assigned_check.disk} does not exist"
elif self.assigned_check.check_type == CheckType.SCRIPT:
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
elif self.assigned_check.check_type == CheckType.PING:
body = self.more_info
elif self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
avg = int(mean(self.history))
if self.assigned_check.check_type == CheckType.CPU_LOAD:
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.MEMORY:
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.WINSVC:
body = subject + f" - Status: {self.more_info}"
elif self.assigned_check.check_type == CheckType.EVENT_LOG:
if self.assigned_check.event_source and self.assigned_check.event_message:
start = f"Event ID {self.assigned_check.event_id}, source {self.assigned_check.event_source}, containing string {self.assigned_check.event_message} "
elif self.assigned_check.event_source:
start = f"Event ID {self.assigned_check.event_id}, source {self.assigned_check.event_source} "
elif self.assigned_check.event_message:
start = f"Event ID {self.assigned_check.event_id}, containing string {self.assigned_check.event_message} "
else:
start = f"Event ID {self.assigned_check.event_id} "
body = start + f"was found in the {self.assigned_check.log_name} log\n\n"
for i in self.extra_details["log"]:
try:
if i["message"]:
body += f"{i['message']}\n"
except:
continue
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Failed"
else:
subject = f"{self} Failed"
if self.assigned_check.check_type == CheckType.DISK_SPACE:
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
try:
percent_used = [
d["percent"]
for d in self.agent.disks
if d["device"] == self.assigned_check.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.assigned_check.disk} does not exist"
elif self.assigned_check.check_type == CheckType.SCRIPT:
body = subject + f" - Return code: {self.retcode}"
elif self.assigned_check.check_type == CheckType.PING:
body = subject
elif self.assigned_check.check_type in (CheckType.CPU_LOAD, CheckType.MEMORY):
text = ""
if self.assigned_check.warning_threshold:
text += f" Warning Threshold: {self.assigned_check.warning_threshold}%"
if self.assigned_check.error_threshold:
text += f" Error Threshold: {self.assigned_check.error_threshold}%"
avg = int(mean(self.history))
if self.assigned_check.check_type == CheckType.CPU_LOAD:
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.MEMORY:
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.assigned_check.check_type == CheckType.WINSVC:
body = subject + f" - Status: {self.more_info}"
elif self.assigned_check.check_type == CheckType.EVENT_LOG:
body = subject
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = f"{self} is now back to normal"
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
CORE.send_sms(subject, alert_template=self.agent.alert_template)
def handle_resolved_check_sms_alert_task(pk: int) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending text
if not alert.resolved_sms_sent:
check_result = CheckResult.objects.get(
assigned_check=alert.assigned_check, agent=alert.agent
)
sleep(rand_range(100, 1500))
check_result.send_resolved_sms()
alert.resolved_sms_sent = djangotime.now()
alert.save(update_fields=["resolved_sms_sent"])
return "ok" | null |
189,112 | import datetime as dt
from time import sleep
from typing import Optional
from django.utils import timezone as djangotime
from alerts.models import Alert
from checks.models import CheckResult
from tacticalrmm.celery import app
from tacticalrmm.helpers import rand_range
class Alert(models.Model):
def __str__(self) -> str:
def assigned_agent(self) -> "Optional[Agent]":
def site(self) -> "Site":
def client(self) -> "Client":
def resolve(self) -> None:
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
def parse_script_args(self, args: List[str]) -> List[str]:
class CheckResult(models.Model):
def __str__(self):
def save(self, *args, **kwargs):
def history_info(self):
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
def handle_check(self, data, check: "Check", agent: "Agent"):
def send_email(self):
def send_sms(self):
def send_resolved_email(self):
def send_resolved_sms(self):
def handle_resolved_check_email_alert_task(pk: int) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending email
if not alert.resolved_email_sent:
check_result = CheckResult.objects.get(
assigned_check=alert.assigned_check, agent=alert.agent
)
sleep(rand_range(100, 1500))
check_result.send_resolved_email()
alert.resolved_email_sent = djangotime.now()
alert.save(update_fields=["resolved_email_sent"])
return "ok" | null |
189,113 | import asyncio
from datetime import datetime as dt
from django.db.models import Prefetch, Q
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from agents.models import Agent
from alerts.models import Alert
from automation.models import Policy
from tacticalrmm.constants import AGENT_DEFER, CheckStatus, CheckType
from tacticalrmm.exceptions import NatsDown
from tacticalrmm.helpers import notify_error
from tacticalrmm.nats_utils import abulk_nats_command
from tacticalrmm.permissions import _has_perm_on_agent
from .models import Check, CheckHistory, CheckResult
from .permissions import BulkRunChecksPerms, ChecksPerms, RunChecksPerms
from .serializers import CheckHistorySerializer, CheckSerializer
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def run_checks(request, agent_id):
agent = get_object_or_404(Agent, agent_id=agent_id)
r = asyncio.run(agent.nats_cmd({"func": "runchecks"}, timeout=15))
if r == "busy":
return notify_error(f"Checks are already running on {agent.hostname}")
elif r == "ok":
return Response(f"Checks will now be run on {agent.hostname}")
return notify_error("Unable to contact the agent") | null |
189,114 | import asyncio
from datetime import datetime as dt
from django.db.models import Prefetch, Q
from django.shortcuts import get_object_or_404
from django.utils import timezone as djangotime
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from agents.models import Agent
from alerts.models import Alert
from automation.models import Policy
from tacticalrmm.constants import AGENT_DEFER, CheckStatus, CheckType
from tacticalrmm.exceptions import NatsDown
from tacticalrmm.helpers import notify_error
from tacticalrmm.nats_utils import abulk_nats_command
from tacticalrmm.permissions import _has_perm_on_agent
from .models import Check, CheckHistory, CheckResult
from .permissions import BulkRunChecksPerms, ChecksPerms, RunChecksPerms
from .serializers import CheckHistorySerializer, CheckSerializer
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def bulk_run_checks(request, target, pk):
q = Q()
match target:
case "client":
q = Q(site__client__id=pk)
case "site":
q = Q(site__id=pk)
agent_ids = list(
Agent.objects.only("agent_id", "site")
.filter(q)
.values_list("agent_id", flat=True)
)
if not agent_ids:
return notify_error("No agents matched query")
payload = {"func": "runchecks"}
items = [(agent_id, payload) for agent_id in agent_ids]
try:
asyncio.run(abulk_nats_command(items=items))
except NatsDown as e:
return notify_error(str(e))
ret = f"Checks will now be run on {len(agent_ids)} agents"
return Response(ret) | null |
189,115 | from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast
from django.db import models
from core.utils import get_core_settings
from tacticalrmm.constants import (
AuditActionType,
AuditObjType,
DebugLogLevel,
DebugLogType,
PAAction,
PAStatus,
)
from tacticalrmm.middleware import get_debug_info, get_username
from tacticalrmm.models import PermissionQuerySet
def get_core_settings() -> "CoreSettings":
from core.models import CORESETTINGS_CACHE_KEY, CoreSettings
coresettings = cache.get(CORESETTINGS_CACHE_KEY)
if coresettings and isinstance(coresettings, CoreSettings):
return coresettings
else:
coresettings = CoreSettings.objects.first()
if not coresettings:
raise CoreSettingsNotFound("CoreSettings not found.")
cache.set(CORESETTINGS_CACHE_KEY, coresettings, 600)
return cast(CoreSettings, coresettings)
def get_debug_level() -> str:
return get_core_settings().agent_debug_level | null |
189,116 | from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def update_agent_field(apps, schema_editor):
AuditLog = apps.get_model("logs", "AuditLog")
Agent = apps.get_model("agents", "Agent")
for log in AuditLog.objects.exclude(agent_id=None):
try:
log.agent_id = Agent.objects.get(pk=log.agent_id).agent_id
log.save()
except (ObjectDoesNotExist, ValueError):
pass | null |
189,117 | import datetime as dt
from django.db.models.signals import post_init
from django.dispatch import receiver
from tacticalrmm.constants import PAAction, PAStatus
from tacticalrmm.helpers import date_is_in_past
from .models import PendingAction
class PendingAction(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="pendingactions",
on_delete=models.CASCADE,
)
entry_time = models.DateTimeField(auto_now_add=True)
action_type = models.CharField(
max_length=255, choices=PAAction.choices, null=True, blank=True
)
status = models.CharField(
max_length=255,
choices=PAStatus.choices,
default=PAStatus.PENDING,
)
details = models.JSONField(null=True, blank=True)
def __str__(self) -> str:
return f"{self.agent.hostname} - {self.action_type}"
def due(self) -> str:
if self.action_type == PAAction.SCHED_REBOOT:
return cast(str, self.details["time"])
elif self.action_type == PAAction.AGENT_UPDATE:
return "Next update cycle"
elif self.action_type == PAAction.CHOCO_INSTALL:
return "ASAP"
return "On next checkin"
def description(self) -> Optional[str]:
if self.action_type == PAAction.SCHED_REBOOT:
return "Device pending reboot"
elif self.action_type == PAAction.AGENT_UPDATE:
return f"Agent update to {self.details['version']}"
elif self.action_type == PAAction.CHOCO_INSTALL:
return f"{self.details['name']} software install"
elif self.action_type in (
PAAction.RUN_CMD,
PAAction.RUN_SCRIPT,
PAAction.RUN_PATCH_SCAN,
PAAction.RUN_PATCH_INSTALL,
):
return f"{self.action_type}"
return None
def handle_status(sender, instance: PendingAction, **kwargs):
if instance.pk:
# change status to completed once scheduled reboot date/time has expired
if (
instance.action_type == PAAction.SCHED_REBOOT
and instance.status == PAStatus.PENDING
):
reboot_time = dt.datetime.strptime(
instance.details["time"], "%Y-%m-%d %H:%M:%S"
)
if date_is_in_past(
datetime_obj=reboot_time, agent_tz=instance.agent.timezone
):
instance.status = PAStatus.COMPLETED
instance.save(update_fields=["status"]) | null |
189,118 | from tacticalrmm.celery import app
class AutomatedTask(BaseAuditModel):
def __str__(self) -> str:
def save(self, *args, **kwargs) -> None:
def delete(self, *args, **kwargs):
def schedule(self) -> Optional[str]:
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
def serialize(task):
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
def generate_nats_task_payload(self) -> Dict[str, Any]:
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
def should_create_alert(self, alert_template=None):
def run_win_policy_autotasks_task(task: int) -> str:
from autotasks.models import AutomatedTask
try:
policy_task = AutomatedTask.objects.get(pk=task)
except AutomatedTask.DoesNotExist:
return "AutomatedTask not found"
if not policy_task.policy:
return "AutomatedTask must be a policy"
# get related agents from policy
for agent in policy_task.policy.related_agents():
policy_task.run_win_task(agent)
return "ok" | null |
189,119 | import uuid
from typing import Dict
from django.contrib.postgres.fields import ArrayField
from django.core.cache import cache
from django.db import models
from agents.models import Agent
from logs.models import BaseAuditModel
from tacticalrmm.constants import AGENT_DEFER, AgentMonType, CustomFieldType, GoArch
from tacticalrmm.models import PermissionQuerySet
def _default_failing_checks_data() -> Dict[str, bool]:
return {"error": False, "warning": False} | null |
189,120 | from django.db import migrations
from tacticalrmm.constants import GoArch
def change_arch(apps, schema_editor):
Deployment = apps.get_model("clients", "Deployment")
for d in Deployment.objects.all():
if d.arch == "64":
d.arch = GoArch.AMD64
else:
d.arch = GoArch.i386
d.save(update_fields=["arch"]) | null |
189,121 | import asyncio
from typing import Dict, Tuple, Union
from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from agents.models import Agent
from tacticalrmm.helpers import notify_error
from .permissions import WinSvcsPerms
def process_nats_response(data: Union[str, Dict]) -> Tuple[bool, bool, str]:
natserror = isinstance(data, str)
success = (
data["success"]
if isinstance(data, dict) and isinstance(data["success"], bool)
else False
)
errormsg = (
data["errormsg"]
if isinstance(data, dict) and isinstance(data["errormsg"], str)
else "timeout"
)
return success, natserror, errormsg | null |
189,122 | import asyncio
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.conf import settings
from agents.permissions import RunScriptPerms
from tacticalrmm.constants import ScriptShell, ScriptType
from tacticalrmm.helpers import notify_error
from .models import Script, ScriptSnippet
from .permissions import ScriptsPerms
from .serializers import (
ScriptSerializer,
ScriptSnippetSerializer,
ScriptTableSerializer,
)
from core.utils import clear_entire_cache
class Script(BaseAuditModel):
guid = models.CharField(max_length=64, null=True, blank=True)
name = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True, default="")
filename = models.CharField(max_length=255, null=True, blank=True)
shell = models.CharField(
max_length=100, choices=ScriptShell.choices, default=ScriptShell.POWERSHELL
)
script_type = models.CharField(
max_length=100, choices=ScriptType.choices, default=ScriptType.USER_DEFINED
)
args = ArrayField(
models.TextField(null=True, blank=True),
null=True,
blank=True,
default=list,
)
env_vars = ArrayField(
models.TextField(null=True, blank=True),
null=True,
blank=True,
default=list,
)
syntax = TextField(null=True, blank=True)
favorite = models.BooleanField(default=False)
category = models.CharField(max_length=100, null=True, blank=True)
script_body = models.TextField(blank=True, default="")
script_hash = models.CharField(max_length=100, null=True, blank=True)
code_base64 = models.TextField(blank=True, default="") # deprecated
default_timeout = models.PositiveIntegerField(default=90)
hidden = models.BooleanField(default=False)
supported_platforms = ArrayField(
models.CharField(max_length=20), null=True, blank=True, default=list
)
run_as_user = models.BooleanField(default=False)
def __str__(self):
return self.name
def code_no_snippets(self):
return self.script_body or ""
def code(self):
return self.replace_with_snippets(self.code_no_snippets)
def replace_with_snippets(cls, code):
# check if snippet has been added to script body
matches = re.finditer(r"{{(.*)}}", code)
if matches:
replaced_code = code
for snippet in matches:
snippet_name = snippet.group(1).strip()
if ScriptSnippet.objects.filter(name=snippet_name).exists():
value = ScriptSnippet.objects.get(name=snippet_name).code
else:
value = ""
replaced_code = re.sub(
snippet.group(), value.replace("\\", "\\\\"), replaced_code
)
return replaced_code
return code
def hash_script_body(self):
from django.conf import settings
msg = self.code.encode(errors="ignore")
return hmac.new(settings.SECRET_KEY.encode(), msg, hashlib.sha256).hexdigest()
def load_community_scripts(cls):
import json
import os
from django.conf import settings
# load community uploaded scripts into the database
# skip ones that already exist, only updating name / desc in case it changes
# for install script
scripts_dir = os.path.join(settings.SCRIPTS_DIR, "scripts")
with open(os.path.join(settings.SCRIPTS_DIR, "community_scripts.json")) as f:
info = json.load(f)
# used to remove scripts from DB that are removed from the json file and file system
community_scripts_processed = [] # list of script guids
for script in info:
if os.path.exists(os.path.join(scripts_dir, script["filename"])):
s = cls.objects.filter(
script_type=ScriptType.BUILT_IN, guid=script["guid"]
)
category = (
script["category"] if "category" in script.keys() else "Community"
)
default_timeout = (
int(script["default_timeout"])
if "default_timeout" in script.keys()
else 90
)
args = script["args"] if "args" in script.keys() else []
syntax = script["syntax"] if "syntax" in script.keys() else ""
supported_platforms = (
script["supported_platforms"]
if "supported_platforms" in script.keys()
else []
)
# if community script exists update it
if s.exists():
i: Script = s.get()
i.name = script["name"]
i.description = script["description"]
i.category = category
i.shell = script["shell"]
i.default_timeout = default_timeout
i.args = args
i.syntax = syntax
i.filename = script["filename"]
i.supported_platforms = supported_platforms
with open(os.path.join(scripts_dir, script["filename"]), "rb") as f:
i.script_body = f.read().decode("utf-8")
# i.hash_script_body()
i.save()
community_scripts_processed.append(i.guid)
# doesn't exist in database so create it
else:
print(f"Adding new community script: {script['name']}")
with open(os.path.join(scripts_dir, script["filename"]), "rb") as f:
script_body = f.read().decode("utf-8")
new_script: Script = cls(
script_body=script_body,
guid=script["guid"],
name=script["name"],
description=script["description"],
shell=script["shell"],
script_type=ScriptType.BUILT_IN,
category=category,
default_timeout=default_timeout,
args=args,
filename=script["filename"],
syntax=syntax,
supported_platforms=supported_platforms,
)
# new_script.hash_script_body() # also saves script
new_script.save()
community_scripts_processed.append(new_script.guid)
# check for community scripts that were deleted from json and scripts folder
count, _ = (
Script.objects.filter(script_type=ScriptType.BUILT_IN)
.exclude(guid__in=community_scripts_processed)
.delete()
)
if count:
print(
f"Removing {count} community scripts that was removed from source repo"
)
def serialize(script):
# serializes the script and returns json
from .serializers import ScriptSerializer
return ScriptSerializer(script).data
# TODO refactor common functionality of parse functions
def parse_script_args(cls, agent, shell: str, args: List[str] = []) -> list:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
# only get the match between the () in regex
string = match.group(1)
value = replace_arg_db_values(
string=string,
instance=agent,
shell=shell,
quotes=shell != ScriptShell.CMD,
)
if value:
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(
re.sub("\\{\\{.*\\}\\}", re.escape(value), arg)
)
else:
# pass parameter unaltered
temp_args.append(arg)
else:
temp_args.append(arg)
return temp_args
# TODO refactor common functionality of parse functions
def parse_script_env_vars(cls, agent, shell: str, env_vars: list[str] = []) -> list:
if not env_vars:
return []
temp_env_vars = []
pattern = re.compile(".*\\{\\{(.*)\\}\\}.*")
for env_var in env_vars:
# must be in format KEY=VALUE
try:
env_key = env_var.split("=")[0]
env_val = env_var.split("=")[1]
except:
continue
if match := pattern.match(env_val):
string = match.group(1)
value = replace_arg_db_values(
string=string,
instance=agent,
shell=shell,
quotes=False,
)
if value:
try:
new_val = re.sub("\\{\\{.*\\}\\}", value, env_val)
except re.error:
new_val = re.sub("\\{\\{.*\\}\\}", re.escape(value), env_val)
temp_env_vars.append(f"{env_key}={new_val}")
else:
# pass parameter unaltered
temp_env_vars.append(env_var)
return temp_env_vars
def download(request, pk):
script = get_object_or_404(Script, pk=pk)
with_snippets = request.GET.get("with_snippets", True)
if with_snippets == "false":
with_snippets = False
match script.shell:
case ScriptShell.POWERSHELL:
ext = ".ps1"
case ScriptShell.CMD:
ext = ".bat"
case ScriptShell.PYTHON:
ext = ".py"
case ScriptShell.SHELL:
ext = ".sh"
case ScriptShell.NUSHELL:
ext = ".nu"
case ScriptShell.DENO:
ext = ".ts"
case _:
ext = ""
return Response(
{
"filename": f"{script.name}{ext}",
"code": script.code if with_snippets else script.code_no_snippets,
}
) | null |
189,123 | import asyncio
import datetime as dt
import time
from contextlib import suppress
from zoneinfo import ZoneInfo
from django.utils import timezone as djangotime
from packaging import version as pyver
from agents.models import Agent
from logs.models import DebugLog
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, DebugLogType
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
def auto_approve_updates_task() -> None:
# scheduled task that checks and approves updates daily
agents = Agent.objects.only(
"pk", "agent_id", "version", "last_seen", "overdue_time", "offline_time"
)
for agent in agents:
agent.delete_superseded_updates()
try:
agent.approve_updates()
except:
continue
online = [
i
for i in agents
if i.status == AGENT_STATUS_ONLINE
and pyver.parse(i.version) >= pyver.parse("1.3.0")
]
chunks = (online[i : i + 40] for i in range(0, len(online), 40))
for chunk in chunks:
for agent in chunk:
asyncio.run(agent.nats_cmd({"func": "getwinupdates"}, wait=False))
time.sleep(1) | null |
189,124 | import asyncio
import datetime as dt
import time
from contextlib import suppress
from zoneinfo import ZoneInfo
from django.utils import timezone as djangotime
from packaging import version as pyver
from agents.models import Agent
from logs.models import DebugLog
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, DebugLogType
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class DebugLog(models.Model):
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
entry_time = models.DateTimeField(auto_now_add=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="debuglogs",
on_delete=models.CASCADE,
null=True,
blank=True,
)
log_level = models.CharField(
max_length=50, choices=DebugLogLevel.choices, default=DebugLogLevel.INFO
)
log_type = models.CharField(
max_length=50, choices=DebugLogType.choices, default=DebugLogType.SYSTEM_ISSUES
)
message = models.TextField(null=True, blank=True)
def info(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() == DebugLogLevel.INFO:
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def warning(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (DebugLogLevel.INFO, DebugLogLevel.WARN):
cls.objects.create(
log_level=DebugLogLevel.INFO,
agent=agent,
log_type=log_type,
message=message,
)
def error(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
):
cls.objects.create(
log_level=DebugLogLevel.ERROR,
agent=agent,
log_type=log_type,
message=message,
)
def critical(
cls,
message: str,
agent: "Optional[Agent]" = None,
log_type: str = DebugLogType.SYSTEM_ISSUES,
) -> None:
if get_debug_level() in (
DebugLogLevel.INFO,
DebugLogLevel.WARN,
DebugLogLevel.ERROR,
DebugLogLevel.CRITICAL,
):
cls.objects.create(
log_level=DebugLogLevel.CRITICAL,
agent=agent,
log_type=log_type,
message=message,
)
def check_agent_update_schedule_task() -> None:
# scheduled task that installs updates on agents if enabled
for agent in Agent.online_agents(min_version="1.3.0"):
agent.delete_superseded_updates()
install = False
patch_policy = agent.get_patch_policy()
# check if auto approval is enabled
if (
patch_policy.critical == "approve"
or patch_policy.important == "approve"
or patch_policy.moderate == "approve"
or patch_policy.low == "approve"
or patch_policy.other == "approve"
):
# get current time in agent local time
timezone = ZoneInfo(agent.timezone)
agent_localtime_now = dt.datetime.now(timezone)
weekday = agent_localtime_now.weekday()
hour = agent_localtime_now.hour
day = agent_localtime_now.day
if agent.patches_last_installed:
# get agent last installed time in local time zone
last_installed = agent.patches_last_installed.astimezone(timezone)
# check if patches were already run for this cycle and exit if so
if last_installed.strftime("%d/%m/%Y") == agent_localtime_now.strftime(
"%d/%m/%Y"
):
continue
# check if schedule is set to daily/weekly and if now is the time to run
if (
patch_policy.run_time_frequency == "daily"
and weekday in patch_policy.run_time_days
and patch_policy.run_time_hour == hour
):
install = True
elif patch_policy.run_time_frequency == "monthly":
if patch_policy.run_time_day > 28:
months_with_30_days = [3, 6, 9, 11]
current_month = agent_localtime_now.month
if current_month == 2:
patch_policy.run_time_day = 28
elif current_month in months_with_30_days:
patch_policy.run_time_day = 30
# check if patches were scheduled to run today and now
if (
day == patch_policy.run_time_day
and patch_policy.run_time_hour == hour
):
install = True
if install:
# initiate update on agent asynchronously and don't worry about ret code
DebugLog.info(
agent=agent,
log_type=DebugLogType.WIN_UPDATES,
message=f"Installing windows updates on {agent.hostname}",
)
nats_data = {
"func": "installwinupdates",
"guids": agent.get_approved_update_guids(),
}
asyncio.run(agent.nats_cmd(nats_data, wait=False))
agent.patches_last_installed = djangotime.now()
agent.save(update_fields=["patches_last_installed"]) | null |
189,125 | from django.db import migrations
from django.utils.timezone import make_aware
from tacticalrmm.constants import TaskType
def migrate_script_data(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
# convert autotask to the new format
for task in AutomatedTask.objects.all():
try:
edited = False
# convert scheduled task_type
if task.task_type == TaskType.SCHEDULED:
task.task_type = TaskType.DAILY
task.run_time_date = make_aware(task.run_time_minute.strptime("%H:%M"))
task.daily_interval = 1
edited = True
# convert actions
if not task.actions:
if not task.script:
task.delete()
task.actions = [
{
"type": "script",
"script": task.script.pk,
"script_args": task.script_args,
"timeout": task.timeout,
"name": task.script.name,
}
]
edited = True
if edited:
task.save()
except:
continue | null |
189,126 | from django.db import migrations
from django.db.models import Count
from autotasks.models import generate_task_name
from tacticalrmm.constants import TaskSyncStatus
def generate_task_name() -> str:
chars = string.ascii_letters
return "TacticalRMM_" + "".join(random.choice(chars) for i in range(35))
def check_for_win_task_name_duplicates(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
TaskResult = apps.get_model("autotasks", "TaskResult")
duplicate_tasks = (
AutomatedTask.objects.values("win_task_name")
.annotate(records=Count("win_task_name"))
.filter(records__gt=1)
)
for task in duplicate_tasks:
dups = list(AutomatedTask.objects.filter(win_task_name=task["win_task_name"]))
for x in range(task["records"] - 1):
dups[x].win_task_name = generate_task_name()
dups[x].save(update_fields=["win_task_name"])
# update task_result sync status
TaskResult.objects.filter(task=dups[x]).update(
sync_status=TaskSyncStatus.NOT_SYNCED
) | null |
189,127 | from django.db import migrations, transaction
from django.db.utils import IntegrityError
def migrate_task_results(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
TaskResult = apps.get_model("autotasks", "TaskResult")
for task in AutomatedTask.objects.exclude(agent=None):
try:
with transaction.atomic():
if task.managed_by_policy:
TaskResult.objects.create(
task_id=task.parent_task,
agent_id=task.agent_id,
retcode=task.retcode,
stdout=task.stdout,
stderr=task.stderr,
execution_time=task.execution_time,
last_run=task.last_run,
status=task.status,
sync_status=task.sync_status,
)
else:
TaskResult.objects.create(
task_id=task.id,
agent_id=task.agent.id,
retcode=task.retcode,
stdout=task.stdout,
stderr=task.stderr,
execution_time=task.execution_time,
last_run=task.last_run,
status=task.status,
sync_status=task.sync_status,
)
except IntegrityError:
continue | null |
189,128 | from django.db import migrations
def migrate_env_vars(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
for task in AutomatedTask.objects.iterator(chunk_size=30):
try:
tmp = []
if isinstance(task.actions, list) and task.actions:
for t in task.actions:
if isinstance(t, dict):
if t["type"] == "script":
try:
t["env_vars"]
except KeyError:
t["env_vars"] = []
tmp.append(t)
if tmp:
task.actions = tmp
task.save(update_fields=["actions"])
except Exception as e:
print(f"ERROR: {e}") | null |
189,129 | from django.db import migrations
from tacticalrmm.utils import get_bit_days
DAYS_OF_WEEK = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
}
def migrate_days(apps, schema_editor):
AutomatedTask = apps.get_model("autotasks", "AutomatedTask")
for task in AutomatedTask.objects.exclude(run_time_days__isnull=True).exclude(
run_time_days=[]
):
run_days = [DAYS_OF_WEEK.get(day) for day in task.run_time_days]
task.run_time_bit_weekdays = get_bit_days(run_days)
task.save(update_fields=["run_time_bit_weekdays"]) | null |
189,130 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Agent(BaseAuditModel):
def __str__(self) -> str:
def save(self, *args, **kwargs):
def client(self) -> "Client":
def timezone(self) -> str:
def is_posix(self) -> bool:
def arch(self) -> Optional[str]:
def do_update(self, *, token: str = "", force: bool = False) -> str:
def status(self) -> str:
def checks(self) -> Dict[str, Any]:
def pending_actions_count(self) -> int:
def cpu_model(self) -> List[str]:
def graphics(self) -> str:
def local_ips(self) -> str:
def make_model(self) -> str:
def physical_disks(self) -> Sequence[Disk]:
def serial_number(self) -> str:
def hex_mesh_node_id(self) -> str:
def online_agents(cls, min_version: str = "") -> "List[Agent]":
def is_supported_script(self, platforms: List[str]) -> bool:
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
def check_run_interval(self) -> int:
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
def approve_updates(self) -> None:
def get_patch_policy(self) -> "WinUpdatePolicy":
def get_approved_update_guids(self) -> list[str]:
def set_alert_template(self) -> "Optional[AlertTemplate]":
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
def get_checks_from_policies(self) -> "List[Check]":
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
def serialize(agent: "Agent") -> Dict[str, Any]:
def delete_superseded_updates(self) -> None:
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
def send_outage_email(self) -> None:
def send_recovery_email(self) -> None:
def send_outage_sms(self) -> None:
def send_recovery_sms(self) -> None:
class AutomatedTask(BaseAuditModel):
def __str__(self) -> str:
def save(self, *args, **kwargs) -> None:
def delete(self, *args, **kwargs):
def schedule(self) -> Optional[str]:
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
def serialize(task):
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
def generate_nats_task_payload(self) -> Dict[str, Any]:
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
def should_create_alert(self, alert_template=None):
def create_win_task_schedule(pk: int, agent_id: Optional[str] = None) -> str:
with suppress(
AutomatedTask.DoesNotExist,
Agent.DoesNotExist,
):
task = AutomatedTask.objects.get(pk=pk)
if agent_id:
task.create_task_on_agent(Agent.objects.get(agent_id=agent_id))
else:
task.create_task_on_agent()
return "ok" | null |
189,131 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class AutomatedTask(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="autotasks",
on_delete=models.CASCADE,
null=True,
blank=True,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.CASCADE,
)
custom_field = models.ForeignKey(
"core.CustomField",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
# format -> [{"type": "script", "script": 1, "name": "Script Name", "timeout": 90, "script_args": [], "env_vars": []}, {"type": "cmd", "command": "whoami", "timeout": 90}]
actions = JSONField(default=list)
assigned_check = models.ForeignKey(
"checks.Check",
null=True,
blank=True,
related_name="assignedtasks",
on_delete=models.SET_NULL,
)
name = models.CharField(max_length=255)
collector_all_output = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
continue_on_error = models.BooleanField(default=True)
alert_severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_alert = models.BooleanField(default=False)
text_alert = models.BooleanField(default=False)
dashboard_alert = models.BooleanField(default=False)
# options sent to agent for task creation
# general task settings
task_type = models.CharField(
max_length=100, choices=TaskType.choices, default=TaskType.MANUAL
)
win_task_name = models.CharField(
max_length=255, unique=True, blank=True, default=generate_task_name
) # should be changed to unique=True
run_time_date = DateTimeField(null=True, blank=True)
expire_date = DateTimeField(null=True, blank=True)
# daily
daily_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(255)]
)
# weekly
run_time_bit_weekdays = models.IntegerField(null=True, blank=True)
weekly_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(52)]
)
run_time_minute = models.CharField(
max_length=5, null=True, blank=True
) # deprecated
# monthly
monthly_days_of_month = models.PositiveBigIntegerField(blank=True, null=True)
monthly_months_of_year = models.PositiveIntegerField(blank=True, null=True)
# monthly days of week
monthly_weeks_of_month = models.PositiveSmallIntegerField(blank=True, null=True)
# additional task settings
task_repetition_duration = models.CharField(max_length=10, null=True, blank=True)
task_repetition_interval = models.CharField(max_length=10, null=True, blank=True)
stop_task_at_duration_end = models.BooleanField(blank=True, default=False)
random_task_delay = models.CharField(max_length=10, null=True, blank=True)
remove_if_not_scheduled = models.BooleanField(default=False)
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
task_instance_policy = models.PositiveSmallIntegerField(blank=True, default=1)
# deprecated
managed_by_policy = models.BooleanField(default=False)
# non-database property
task_result: "Union[TaskResult, Dict[None, None]]" = {}
def __str__(self) -> str:
return self.name
def save(self, *args, **kwargs) -> None:
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
# get old task if exists
old_task = AutomatedTask.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_task, *args, **kwargs)
# check if fields were updated that require a sync to the agent and set status to notsynced
if old_task:
for field in self.fields_that_trigger_task_update_on_agent:
if getattr(self, field) != getattr(old_task, field):
if self.policy:
TaskResult.objects.exclude(
sync_status=TaskSyncStatus.INITIAL
).filter(task__policy_id=self.policy.id).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
else:
TaskResult.objects.filter(agent=self.agent, task=self).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
def delete(self, *args, **kwargs):
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
super().delete(*args, **kwargs)
def schedule(self) -> Optional[str]:
if self.task_type == TaskType.MANUAL:
return "Manual"
elif self.task_type == TaskType.CHECK_FAILURE:
return "Every time check fails"
elif self.task_type == TaskType.RUN_ONCE:
return f'Run once on {self.run_time_date.strftime("%m/%d/%Y %I:%M%p")}'
elif self.task_type == TaskType.DAILY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
if self.daily_interval == 1:
return f"Daily at {run_time_nice}"
else:
return f"Every {self.daily_interval} days at {run_time_nice}"
elif self.task_type == TaskType.WEEKLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
days = bitdays_to_string(self.run_time_bit_weekdays)
if self.weekly_interval != 1:
return f"{days} at {run_time_nice}"
else:
return f"{days} at {run_time_nice} every {self.weekly_interval} weeks"
elif self.task_type == TaskType.MONTHLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
days = bitmonthdays_to_string(self.monthly_days_of_month)
return f"Runs on {months} on days {days} at {run_time_nice}"
elif self.task_type == TaskType.MONTHLY_DOW:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
weeks = bitweeks_to_string(self.monthly_weeks_of_month)
days = bitdays_to_string(self.run_time_bit_weekdays)
return f"Runs on {months} on {weeks} on {days} at {run_time_nice}"
elif self.task_type == TaskType.ONBOARDING:
return "Onboarding: Runs once on task creation."
return None
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
return FIELDS_TRIGGER_TASK_UPDATE_AGENT
def serialize(task):
# serializes the task and returns json
from .serializers import TaskAuditSerializer
return TaskAuditSerializer(task).data
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
# Copies certain properties on this task (self) to a new task and sets it to the supplied Policy
task = AutomatedTask.objects.create(
policy=policy,
assigned_check=assigned_check,
)
for field in POLICY_TASK_FIELDS_TO_COPY:
setattr(task, field, getattr(self, field))
task.save()
# agent version >= 1.8.0
def generate_nats_task_payload(self) -> Dict[str, Any]:
task = {
"pk": self.pk,
"type": "rmm",
"name": self.win_task_name,
"overwrite_task": True,
"enabled": self.enabled,
"trigger": (
self.task_type
if self.task_type != TaskType.CHECK_FAILURE
else TaskType.MANUAL
),
"multiple_instances": self.task_instance_policy or 0,
"delete_expired_task_after": (
self.remove_if_not_scheduled if self.expire_date else False
),
"start_when_available": (
self.run_asap_after_missed
if self.task_type != TaskType.RUN_ONCE
else True
),
}
if self.task_type in (
TaskType.DAILY,
TaskType.WEEKLY,
TaskType.MONTHLY,
TaskType.MONTHLY_DOW,
TaskType.RUN_ONCE,
):
if not self.run_time_date:
self.run_time_date = djangotime.now()
task["start_year"] = self.run_time_date.year
task["start_month"] = self.run_time_date.month
task["start_day"] = self.run_time_date.day
task["start_hour"] = self.run_time_date.hour
task["start_min"] = self.run_time_date.minute
if self.expire_date:
task["expire_year"] = self.expire_date.year
task["expire_month"] = self.expire_date.month
task["expire_day"] = self.expire_date.day
task["expire_hour"] = self.expire_date.hour
task["expire_min"] = self.expire_date.minute
if self.random_task_delay:
task["random_delay"] = convert_to_iso_duration(self.random_task_delay)
if self.task_repetition_interval and self.task_repetition_duration:
task["repetition_interval"] = convert_to_iso_duration(
self.task_repetition_interval
)
task["repetition_duration"] = convert_to_iso_duration(
self.task_repetition_duration
)
task["stop_at_duration_end"] = self.stop_task_at_duration_end
if self.task_type == TaskType.DAILY:
task["day_interval"] = self.daily_interval
elif self.task_type == TaskType.WEEKLY:
task["week_interval"] = self.weekly_interval
task["days_of_week"] = self.run_time_bit_weekdays
elif self.task_type == TaskType.MONTHLY:
# check if "last day is configured"
if self.monthly_days_of_month >= 0x80000000:
task["days_of_month"] = self.monthly_days_of_month - 0x80000000
task["run_on_last_day_of_month"] = True
else:
task["days_of_month"] = self.monthly_days_of_month
task["run_on_last_day_of_month"] = False
task["months_of_year"] = self.monthly_months_of_year
elif self.task_type == TaskType.MONTHLY_DOW:
task["days_of_week"] = self.run_time_bit_weekdays
task["months_of_year"] = self.monthly_months_of_year
task["weeks_of_month"] = self.monthly_weeks_of_month
return task
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.INITIAL
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to create scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully created."
)
return "ok"
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.NOT_SYNCED
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to modify scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully modified."
)
return "ok"
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": self.win_task_name},
}
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok" and "The system cannot find the file specified" not in r:
task_result.sync_status = TaskSyncStatus.PENDING_DELETION
with suppress(DatabaseError):
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to delete task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
self.delete()
logger.info(f"{task_result.agent.hostname} task {self.name} was deleted.")
return "ok"
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
asyncio.run(
task_result.agent.nats_cmd(
{"func": "runtask", "taskpk": self.pk}, wait=False
)
)
return "ok"
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.task_always_alert
or alert_template.task_always_email
or alert_template.task_always_text
)
)
)
def modify_win_task(pk: int, agent_id: Optional[str] = None) -> str:
with suppress(
AutomatedTask.DoesNotExist,
Agent.DoesNotExist,
):
task = AutomatedTask.objects.get(pk=pk)
if agent_id:
task.modify_task_on_agent(Agent.objects.get(agent_id=agent_id))
else:
task.modify_task_on_agent()
return "ok" | null |
189,132 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class AutomatedTask(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="autotasks",
on_delete=models.CASCADE,
null=True,
blank=True,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.CASCADE,
)
custom_field = models.ForeignKey(
"core.CustomField",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
# format -> [{"type": "script", "script": 1, "name": "Script Name", "timeout": 90, "script_args": [], "env_vars": []}, {"type": "cmd", "command": "whoami", "timeout": 90}]
actions = JSONField(default=list)
assigned_check = models.ForeignKey(
"checks.Check",
null=True,
blank=True,
related_name="assignedtasks",
on_delete=models.SET_NULL,
)
name = models.CharField(max_length=255)
collector_all_output = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
continue_on_error = models.BooleanField(default=True)
alert_severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_alert = models.BooleanField(default=False)
text_alert = models.BooleanField(default=False)
dashboard_alert = models.BooleanField(default=False)
# options sent to agent for task creation
# general task settings
task_type = models.CharField(
max_length=100, choices=TaskType.choices, default=TaskType.MANUAL
)
win_task_name = models.CharField(
max_length=255, unique=True, blank=True, default=generate_task_name
) # should be changed to unique=True
run_time_date = DateTimeField(null=True, blank=True)
expire_date = DateTimeField(null=True, blank=True)
# daily
daily_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(255)]
)
# weekly
run_time_bit_weekdays = models.IntegerField(null=True, blank=True)
weekly_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(52)]
)
run_time_minute = models.CharField(
max_length=5, null=True, blank=True
) # deprecated
# monthly
monthly_days_of_month = models.PositiveBigIntegerField(blank=True, null=True)
monthly_months_of_year = models.PositiveIntegerField(blank=True, null=True)
# monthly days of week
monthly_weeks_of_month = models.PositiveSmallIntegerField(blank=True, null=True)
# additional task settings
task_repetition_duration = models.CharField(max_length=10, null=True, blank=True)
task_repetition_interval = models.CharField(max_length=10, null=True, blank=True)
stop_task_at_duration_end = models.BooleanField(blank=True, default=False)
random_task_delay = models.CharField(max_length=10, null=True, blank=True)
remove_if_not_scheduled = models.BooleanField(default=False)
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
task_instance_policy = models.PositiveSmallIntegerField(blank=True, default=1)
# deprecated
managed_by_policy = models.BooleanField(default=False)
# non-database property
task_result: "Union[TaskResult, Dict[None, None]]" = {}
def __str__(self) -> str:
return self.name
def save(self, *args, **kwargs) -> None:
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
# get old task if exists
old_task = AutomatedTask.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_task, *args, **kwargs)
# check if fields were updated that require a sync to the agent and set status to notsynced
if old_task:
for field in self.fields_that_trigger_task_update_on_agent:
if getattr(self, field) != getattr(old_task, field):
if self.policy:
TaskResult.objects.exclude(
sync_status=TaskSyncStatus.INITIAL
).filter(task__policy_id=self.policy.id).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
else:
TaskResult.objects.filter(agent=self.agent, task=self).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
def delete(self, *args, **kwargs):
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
super().delete(*args, **kwargs)
def schedule(self) -> Optional[str]:
if self.task_type == TaskType.MANUAL:
return "Manual"
elif self.task_type == TaskType.CHECK_FAILURE:
return "Every time check fails"
elif self.task_type == TaskType.RUN_ONCE:
return f'Run once on {self.run_time_date.strftime("%m/%d/%Y %I:%M%p")}'
elif self.task_type == TaskType.DAILY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
if self.daily_interval == 1:
return f"Daily at {run_time_nice}"
else:
return f"Every {self.daily_interval} days at {run_time_nice}"
elif self.task_type == TaskType.WEEKLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
days = bitdays_to_string(self.run_time_bit_weekdays)
if self.weekly_interval != 1:
return f"{days} at {run_time_nice}"
else:
return f"{days} at {run_time_nice} every {self.weekly_interval} weeks"
elif self.task_type == TaskType.MONTHLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
days = bitmonthdays_to_string(self.monthly_days_of_month)
return f"Runs on {months} on days {days} at {run_time_nice}"
elif self.task_type == TaskType.MONTHLY_DOW:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
weeks = bitweeks_to_string(self.monthly_weeks_of_month)
days = bitdays_to_string(self.run_time_bit_weekdays)
return f"Runs on {months} on {weeks} on {days} at {run_time_nice}"
elif self.task_type == TaskType.ONBOARDING:
return "Onboarding: Runs once on task creation."
return None
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
return FIELDS_TRIGGER_TASK_UPDATE_AGENT
def serialize(task):
# serializes the task and returns json
from .serializers import TaskAuditSerializer
return TaskAuditSerializer(task).data
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
# Copies certain properties on this task (self) to a new task and sets it to the supplied Policy
task = AutomatedTask.objects.create(
policy=policy,
assigned_check=assigned_check,
)
for field in POLICY_TASK_FIELDS_TO_COPY:
setattr(task, field, getattr(self, field))
task.save()
# agent version >= 1.8.0
def generate_nats_task_payload(self) -> Dict[str, Any]:
task = {
"pk": self.pk,
"type": "rmm",
"name": self.win_task_name,
"overwrite_task": True,
"enabled": self.enabled,
"trigger": (
self.task_type
if self.task_type != TaskType.CHECK_FAILURE
else TaskType.MANUAL
),
"multiple_instances": self.task_instance_policy or 0,
"delete_expired_task_after": (
self.remove_if_not_scheduled if self.expire_date else False
),
"start_when_available": (
self.run_asap_after_missed
if self.task_type != TaskType.RUN_ONCE
else True
),
}
if self.task_type in (
TaskType.DAILY,
TaskType.WEEKLY,
TaskType.MONTHLY,
TaskType.MONTHLY_DOW,
TaskType.RUN_ONCE,
):
if not self.run_time_date:
self.run_time_date = djangotime.now()
task["start_year"] = self.run_time_date.year
task["start_month"] = self.run_time_date.month
task["start_day"] = self.run_time_date.day
task["start_hour"] = self.run_time_date.hour
task["start_min"] = self.run_time_date.minute
if self.expire_date:
task["expire_year"] = self.expire_date.year
task["expire_month"] = self.expire_date.month
task["expire_day"] = self.expire_date.day
task["expire_hour"] = self.expire_date.hour
task["expire_min"] = self.expire_date.minute
if self.random_task_delay:
task["random_delay"] = convert_to_iso_duration(self.random_task_delay)
if self.task_repetition_interval and self.task_repetition_duration:
task["repetition_interval"] = convert_to_iso_duration(
self.task_repetition_interval
)
task["repetition_duration"] = convert_to_iso_duration(
self.task_repetition_duration
)
task["stop_at_duration_end"] = self.stop_task_at_duration_end
if self.task_type == TaskType.DAILY:
task["day_interval"] = self.daily_interval
elif self.task_type == TaskType.WEEKLY:
task["week_interval"] = self.weekly_interval
task["days_of_week"] = self.run_time_bit_weekdays
elif self.task_type == TaskType.MONTHLY:
# check if "last day is configured"
if self.monthly_days_of_month >= 0x80000000:
task["days_of_month"] = self.monthly_days_of_month - 0x80000000
task["run_on_last_day_of_month"] = True
else:
task["days_of_month"] = self.monthly_days_of_month
task["run_on_last_day_of_month"] = False
task["months_of_year"] = self.monthly_months_of_year
elif self.task_type == TaskType.MONTHLY_DOW:
task["days_of_week"] = self.run_time_bit_weekdays
task["months_of_year"] = self.monthly_months_of_year
task["weeks_of_month"] = self.monthly_weeks_of_month
return task
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.INITIAL
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to create scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully created."
)
return "ok"
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.NOT_SYNCED
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to modify scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully modified."
)
return "ok"
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": self.win_task_name},
}
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok" and "The system cannot find the file specified" not in r:
task_result.sync_status = TaskSyncStatus.PENDING_DELETION
with suppress(DatabaseError):
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to delete task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
self.delete()
logger.info(f"{task_result.agent.hostname} task {self.name} was deleted.")
return "ok"
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
asyncio.run(
task_result.agent.nats_cmd(
{"func": "runtask", "taskpk": self.pk}, wait=False
)
)
return "ok"
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.task_always_alert
or alert_template.task_always_email
or alert_template.task_always_text
)
)
)
def delete_win_task_schedule(pk: int, agent_id: Optional[str] = None) -> str:
with suppress(
AutomatedTask.DoesNotExist,
Agent.DoesNotExist,
):
task = AutomatedTask.objects.get(pk=pk)
if agent_id:
task.delete_task_on_agent(Agent.objects.get(agent_id=agent_id))
else:
task.delete_task_on_agent()
return "ok" | null |
189,133 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Agent(BaseAuditModel):
class Meta:
indexes = [
models.Index(fields=["monitoring_type"]),
]
objects = PermissionQuerySet.as_manager()
version = models.CharField(default="0.1.0", max_length=255)
operating_system = models.CharField(null=True, blank=True, max_length=255)
plat: "AgentPlat" = models.CharField( # type: ignore
max_length=255, choices=AgentPlat.choices, default=AgentPlat.WINDOWS
)
goarch: "GoArch" = models.CharField( # type: ignore
max_length=255, choices=GoArch.choices, null=True, blank=True
)
hostname = models.CharField(max_length=255)
agent_id = models.CharField(max_length=200, unique=True)
last_seen = models.DateTimeField(null=True, blank=True)
services = models.JSONField(null=True, blank=True)
public_ip = models.CharField(null=True, max_length=255)
total_ram = models.IntegerField(null=True, blank=True)
disks = models.JSONField(null=True, blank=True)
boot_time = models.FloatField(null=True, blank=True)
logged_in_username = models.CharField(null=True, blank=True, max_length=255)
last_logged_in_user = models.CharField(null=True, blank=True, max_length=255)
monitoring_type = models.CharField(
max_length=30, choices=AgentMonType.choices, default=AgentMonType.SERVER
)
description = models.CharField(null=True, blank=True, max_length=255)
mesh_node_id = models.CharField(null=True, blank=True, max_length=255)
overdue_email_alert = models.BooleanField(default=False)
overdue_text_alert = models.BooleanField(default=False)
overdue_dashboard_alert = models.BooleanField(default=False)
offline_time = models.PositiveIntegerField(default=4)
overdue_time = models.PositiveIntegerField(default=30)
check_interval = models.PositiveIntegerField(default=120)
needs_reboot = models.BooleanField(default=False)
choco_installed = models.BooleanField(default=False)
wmi_detail = models.JSONField(null=True, blank=True)
patches_last_installed = models.DateTimeField(null=True, blank=True)
time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, null=True, blank=True
)
maintenance_mode = models.BooleanField(default=False)
block_policy_inheritance = models.BooleanField(default=False)
alert_template = models.ForeignKey(
"alerts.AlertTemplate",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
site = models.ForeignKey(
"clients.Site",
related_name="agents",
on_delete=models.RESTRICT,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="agents",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return self.hostname
def save(self, *args, **kwargs):
# prevent recursion since calling set_alert_template() also calls save()
if not hasattr(self, "_processing_set_alert_template"):
self._processing_set_alert_template = False
if self.pk and not self._processing_set_alert_template:
orig = Agent.objects.get(pk=self.pk)
mon_type_changed = self.monitoring_type != orig.monitoring_type
site_changed = self.site_id != orig.site_id
if mon_type_changed or site_changed:
self._processing_set_alert_template = True
self.set_alert_template()
self._processing_set_alert_template = False
super().save(*args, **kwargs)
def client(self) -> "Client":
return self.site.client
def timezone(self) -> str:
# return the default timezone unless the timezone is explicity set per agent
if self.time_zone:
return self.time_zone
return get_core_settings().default_time_zone
def is_posix(self) -> bool:
return self.plat in {AgentPlat.LINUX, AgentPlat.DARWIN}
# DEPRECATED, use goarch instead
def arch(self) -> Optional[str]:
if self.is_posix:
return self.goarch
if self.operating_system is not None:
if "64 bit" in self.operating_system or "64bit" in self.operating_system:
return "64"
elif "32 bit" in self.operating_system or "32bit" in self.operating_system:
return "32"
return None
def do_update(self, *, token: str = "", force: bool = False) -> str:
ver = settings.LATEST_AGENT_VER
if not self.goarch:
DebugLog.warning(
agent=self,
log_type=DebugLogType.AGENT_ISSUES,
message=f"Unable to determine arch on {self.hostname}({self.agent_id}). Skipping agent update.",
)
return "noarch"
if pyver.parse(self.version) <= pyver.parse("1.3.0"):
return "not supported"
url = get_agent_url(goarch=self.goarch, plat=self.plat, token=token)
bin = f"tacticalagent-v{ver}-{self.plat}-{self.goarch}.exe"
if not force:
if self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).exists():
self.pendingactions.filter( # type: ignore
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).delete()
PendingAction.objects.create(
agent=self,
action_type=PAAction.AGENT_UPDATE,
details={
"url": url,
"version": ver,
"inno": bin,
},
)
nats_data = {
"func": "agentupdate",
"payload": {
"url": url,
"version": ver,
"inno": bin,
},
}
asyncio.run(self.nats_cmd(nats_data, wait=False))
return "created"
def status(self) -> str:
now = djangotime.now()
offline = now - djangotime.timedelta(minutes=self.offline_time)
overdue = now - djangotime.timedelta(minutes=self.overdue_time)
if self.last_seen is not None:
if (self.last_seen < offline) and (self.last_seen > overdue):
return AGENT_STATUS_OFFLINE
elif (self.last_seen < offline) and (self.last_seen < overdue):
return AGENT_STATUS_OVERDUE
else:
return AGENT_STATUS_ONLINE
else:
return AGENT_STATUS_OFFLINE
def checks(self) -> Dict[str, Any]:
total, passing, failing, warning, info = 0, 0, 0, 0, 0
for check in self.get_checks_with_policies(exclude_overridden=True):
total += 1
if (
not hasattr(check.check_result, "status")
or isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.PASSING
):
passing += 1
elif (
isinstance(check.check_result, CheckResult)
and check.check_result.status == CheckStatus.FAILING
):
alert_severity = (
check.check_result.alert_severity
if check.check_type
in (
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
)
else check.alert_severity
)
if alert_severity == AlertSeverity.ERROR:
failing += 1
elif alert_severity == AlertSeverity.WARNING:
warning += 1
elif alert_severity == AlertSeverity.INFO:
info += 1
ret = {
"total": total,
"passing": passing,
"failing": failing,
"warning": warning,
"info": info,
"has_failing_checks": failing > 0 or warning > 0,
}
return ret
def pending_actions_count(self) -> int:
ret = cache.get(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}")
if ret is None:
ret = self.pendingactions.filter(status=PAStatus.PENDING).count()
cache.set(f"{AGENT_TBL_PEND_ACTION_CNT_CACHE_PREFIX}{self.pk}", ret, 600)
return ret
def cpu_model(self) -> List[str]:
if self.is_posix:
try:
return cast(List[str], self.wmi_detail["cpus"])
except:
return ["unknown cpu model"]
ret = []
try:
cpus = self.wmi_detail["cpu"]
for cpu in cpus:
name = [x["Name"] for x in cpu if "Name" in x][0]
lp, nc = "", ""
with suppress(Exception):
lp = [
x["NumberOfLogicalProcessors"]
for x in cpu
if "NumberOfCores" in x
][0]
nc = [x["NumberOfCores"] for x in cpu if "NumberOfCores" in x][0]
if lp and nc:
cpu_string = f"{name}, {nc}C/{lp}T"
else:
cpu_string = name
ret.append(cpu_string)
return ret
except:
return ["unknown cpu model"]
def graphics(self) -> str:
if self.is_posix:
try:
if not self.wmi_detail["gpus"]:
return "No graphics cards"
return ", ".join(self.wmi_detail["gpus"])
except:
return "Error getting graphics cards"
ret, mrda = [], []
try:
graphics = self.wmi_detail["graphics"]
for i in graphics:
caption = [x["Caption"] for x in i if "Caption" in x][0]
if "microsoft remote display adapter" in caption.lower():
mrda.append("yes")
continue
ret.append([x["Caption"] for x in i if "Caption" in x][0])
# only return this if no other graphics cards
if not ret and mrda:
return "Microsoft Remote Display Adapter"
return ", ".join(ret)
except:
return "Graphics info requires agent v1.4.14"
def local_ips(self) -> str:
if self.is_posix:
try:
return ", ".join(self.wmi_detail["local_ips"])
except:
return "error getting local ips"
ret = []
try:
ips = self.wmi_detail["network_config"]
except:
return "error getting local ips"
for i in ips:
try:
addr = [x["IPAddress"] for x in i if "IPAddress" in x][0]
except:
continue
if addr is None:
continue
for ip in addr:
if validators.ipv4(ip):
ret.append(ip)
if len(ret) == 1:
return cast(str, ret[0])
return ", ".join(ret) if ret else "error getting local ips"
def make_model(self) -> str:
if self.is_posix:
try:
return cast(str, self.wmi_detail["make_model"])
except:
return "error getting make/model"
with suppress(Exception):
comp_sys = self.wmi_detail["comp_sys"][0]
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
make = [x["Vendor"] for x in comp_sys_prod if "Vendor" in x][0]
model = [x["Model"] for x in comp_sys if "Model" in x][0]
if "to be filled" in model.lower():
mobo = self.wmi_detail["base_board"][0]
make = [x["Manufacturer"] for x in mobo if "Manufacturer" in x][0]
model = [x["Product"] for x in mobo if "Product" in x][0]
if make.lower() == "lenovo":
sysfam = [x["SystemFamily"] for x in comp_sys if "SystemFamily" in x][0]
if "to be filled" not in sysfam.lower():
model = sysfam
return f"{make} {model}"
with suppress(Exception):
comp_sys_prod = self.wmi_detail["comp_sys_prod"][0]
return cast(str, [x["Version"] for x in comp_sys_prod if "Version" in x][0])
return "unknown make/model"
def physical_disks(self) -> Sequence[Disk]:
if self.is_posix:
try:
return cast(List[Disk], self.wmi_detail["disks"])
except:
return ["unknown disk"]
try:
disks = self.wmi_detail["disk"]
ret = []
for disk in disks:
interface_type = [
x["InterfaceType"] for x in disk if "InterfaceType" in x
][0]
if interface_type == "USB":
continue
model = [x["Caption"] for x in disk if "Caption" in x][0]
size = [x["Size"] for x in disk if "Size" in x][0]
size_in_gb = round(int(size) / 1_073_741_824)
ret.append(f"{model} {size_in_gb:,}GB {interface_type}")
return ret
except:
return ["unknown disk"]
def serial_number(self) -> str:
if self.is_posix:
try:
return self.wmi_detail["serialnumber"]
except:
return ""
try:
return self.wmi_detail["bios"][0][0]["SerialNumber"]
except:
return ""
def hex_mesh_node_id(self) -> str:
return _b64_to_hex(self.mesh_node_id)
def online_agents(cls, min_version: str = "") -> "List[Agent]":
if min_version:
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if pyver.parse(i.version) >= pyver.parse(min_version)
and i.status == AGENT_STATUS_ONLINE
]
return [
i
for i in cls.objects.only(*ONLINE_AGENTS)
if i.status == AGENT_STATUS_ONLINE
]
def is_supported_script(self, platforms: List[str]) -> bool:
return self.plat.lower() in platforms if platforms else True
def get_checks_with_policies(
self, exclude_overridden: bool = False
) -> "List[Check]":
if exclude_overridden:
checks = (
list(
check
for check in self.agentchecks.all()
if not check.overridden_by_policy
)
+ self.get_checks_from_policies()
)
else:
checks = list(self.agentchecks.all()) + self.get_checks_from_policies()
return self.add_check_results(checks)
def get_tasks_with_policies(self) -> "List[AutomatedTask]":
tasks = list(self.autotasks.all()) + self.get_tasks_from_policies()
return self.add_task_results(tasks)
def add_task_results(self, tasks: "List[AutomatedTask]") -> "List[AutomatedTask]":
results = self.taskresults.all() # type: ignore
for task in tasks:
for result in results:
if result.task.id == task.pk:
task.task_result = result
break
return tasks
def add_check_results(self, checks: "List[Check]") -> "List[Check]":
results = self.checkresults.all() # type: ignore
for check in checks:
for result in results:
if result.assigned_check.id == check.pk:
check.check_result = result
break
return checks
def get_agent_policies(self) -> "Dict[str, Optional[Policy]]":
from checks.models import Check
site_policy = getattr(self.site, f"{self.monitoring_type}_policy", None)
client_policy = getattr(self.client, f"{self.monitoring_type}_policy", None)
default_policy = getattr(
get_core_settings(), f"{self.monitoring_type}_policy", None
)
# prefetch excluded objects on polices only if policy is not Non
models.prefetch_related_objects(
[
policy
for policy in (self.policy, site_policy, client_policy, default_policy)
if policy
],
"excluded_agents",
"excluded_sites",
"excluded_clients",
models.Prefetch(
"policychecks", queryset=Check.objects.select_related("script")
),
"autotasks",
)
return {
"agent_policy": (
self.policy
if self.policy and not self.policy.is_agent_excluded(self)
else None
),
"site_policy": (
site_policy
if (site_policy and not site_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
else None
),
"client_policy": (
client_policy
if (client_policy and not client_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
else None
),
"default_policy": (
default_policy
if (default_policy and not default_policy.is_agent_excluded(self))
and not self.block_policy_inheritance
and not self.site.block_policy_inheritance
and not self.client.block_policy_inheritance
else None
),
}
def check_run_interval(self) -> int:
interval = self.check_interval
# determine if any agent checks have a custom interval and set the lowest interval
for check in self.get_checks_with_policies():
if check.run_interval and check.run_interval < interval:
# don't allow check runs less than 15s
interval = 15 if check.run_interval < 15 else check.run_interval
return interval
def run_script(
self,
scriptpk: int,
args: List[str] = [],
timeout: int = 120,
full: bool = False,
wait: bool = False,
run_on_any: bool = False,
history_pk: int = 0,
run_as_user: bool = False,
env_vars: list[str] = [],
) -> Any:
from scripts.models import Script
script = Script.objects.get(pk=scriptpk)
# always override if set on script model
if script.run_as_user:
run_as_user = True
parsed_args = script.parse_script_args(self, script.shell, args)
parsed_env_vars = script.parse_script_env_vars(self, script.shell, env_vars)
data = {
"func": "runscriptfull" if full else "runscript",
"timeout": timeout,
"script_args": parsed_args,
"payload": {
"code": script.code,
"shell": script.shell,
},
"run_as_user": run_as_user,
"env_vars": parsed_env_vars,
"nushell_enable_config": settings.NUSHELL_ENABLE_CONFIG,
"deno_default_permissions": settings.DENO_DEFAULT_PERMISSIONS,
}
if history_pk != 0:
data["id"] = history_pk
running_agent = self
if run_on_any:
nats_ping = {"func": "ping"}
# try on self first
r = asyncio.run(self.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = self
else:
for agent in Agent.online_agents():
r = asyncio.run(agent.nats_cmd(nats_ping, timeout=1))
if r == "pong":
running_agent = agent
break
if running_agent.pk == self.pk:
return "Unable to find an online agent"
if wait:
return asyncio.run(running_agent.nats_cmd(data, timeout=timeout, wait=True))
else:
asyncio.run(running_agent.nats_cmd(data, wait=False))
return "ok"
# auto approves updates
def approve_updates(self) -> None:
patch_policy = self.get_patch_policy()
severity_list = []
if patch_policy.critical == "approve":
severity_list.append("Critical")
if patch_policy.important == "approve":
severity_list.append("Important")
if patch_policy.moderate == "approve":
severity_list.append("Moderate")
if patch_policy.low == "approve":
severity_list.append("Low")
if patch_policy.other == "approve":
severity_list.append("")
self.winupdates.filter(severity__in=severity_list, installed=False).exclude(
action="approve"
).update(action="approve")
# returns agent policy merged with a client or site specific policy
def get_patch_policy(self) -> "WinUpdatePolicy":
from winupdate.models import WinUpdatePolicy
# check if site has a patch policy and if so use it
patch_policy = None
agent_policy = self.winupdatepolicy.first()
if not agent_policy:
agent_policy = WinUpdatePolicy.objects.create(agent=self)
# Get the list of policies applied to the agent and select the
# highest priority one.
policies = self.get_agent_policies()
for _, policy in policies.items():
if policy and policy.active and policy.winupdatepolicy.exists():
patch_policy = policy.winupdatepolicy.first()
break
# if policy still doesn't exist return the agent patch policy
if not patch_policy:
return agent_policy
# patch policy exists. check if any agent settings are set to override patch policy
if agent_policy.critical != "inherit":
patch_policy.critical = agent_policy.critical
if agent_policy.important != "inherit":
patch_policy.important = agent_policy.important
if agent_policy.moderate != "inherit":
patch_policy.moderate = agent_policy.moderate
if agent_policy.low != "inherit":
patch_policy.low = agent_policy.low
if agent_policy.other != "inherit":
patch_policy.other = agent_policy.other
if agent_policy.run_time_frequency != "inherit":
patch_policy.run_time_frequency = agent_policy.run_time_frequency
patch_policy.run_time_hour = agent_policy.run_time_hour
patch_policy.run_time_days = agent_policy.run_time_days
if agent_policy.reboot_after_install != "inherit":
patch_policy.reboot_after_install = agent_policy.reboot_after_install
if not agent_policy.reprocess_failed_inherit:
patch_policy.reprocess_failed = agent_policy.reprocess_failed
patch_policy.reprocess_failed_times = agent_policy.reprocess_failed_times
patch_policy.email_if_fail = agent_policy.email_if_fail
return patch_policy
def get_approved_update_guids(self) -> list[str]:
return list(
self.winupdates.filter(action="approve", installed=False).values_list( # type: ignore
"guid", flat=True
)
)
# sets alert template assigned in the following order: policy, site, client, global
# sets None if nothing is found
def set_alert_template(self) -> "Optional[AlertTemplate]":
core = get_core_settings()
policies = self.get_agent_policies()
# loop through all policies applied to agent and return an alert_template if found
processed_policies: List[int] = []
for key, policy in policies.items():
# default alert_template will override a default policy with alert template applied
if (
"default" in key
and core.alert_template
and core.alert_template.is_active
and not core.alert_template.is_agent_excluded(self)
):
self.alert_template = core.alert_template
self.save(update_fields=["alert_template"])
return core.alert_template
elif (
policy
and policy.active
and policy.pk not in processed_policies
and policy.alert_template
and policy.alert_template.is_active
and not policy.alert_template.is_agent_excluded(self)
):
self.alert_template = policy.alert_template
self.save(update_fields=["alert_template"])
return policy.alert_template
elif (
"site" in key
and self.site.alert_template
and self.site.alert_template.is_active
and not self.site.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.alert_template
self.save(update_fields=["alert_template"])
return self.site.alert_template
elif (
"client" in key
and self.site.client.alert_template
and self.site.client.alert_template.is_active
and not self.site.client.alert_template.is_agent_excluded(self)
):
self.alert_template = self.site.client.alert_template
self.save(update_fields=["alert_template"])
return self.site.client.alert_template
# no alert templates found or agent has been excluded
self.alert_template = None
self.save(update_fields=["alert_template"])
return None
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_availability_alert(
self, skip_create=not self.should_create_alert(alert_template)
)
def get_checks_from_policies(self) -> "List[Check]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance or self.agentchecks.exists():
cache_key = f"agent_{self.agent_id}_checks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_checks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_checks"
cached_checks = cache.get(cache_key)
if isinstance(cached_checks, list):
return cached_checks
else:
# clear agent checks that have overridden_by_policy set
self.agentchecks.update(overridden_by_policy=False) # type: ignore
# get agent checks based on policies
checks = Policy.get_policy_checks(self)
cache.set(cache_key, checks, 600)
return checks
def get_tasks_from_policies(self) -> "List[AutomatedTask]":
from automation.models import Policy
# check if agent is blocking inheritance
if self.block_policy_inheritance:
cache_key = f"agent_{self.agent_id}_tasks"
elif self.policy:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_policy_{self.policy_id}_tasks"
else:
cache_key = f"site_{self.monitoring_type}_{self.plat}_{self.site_id}_tasks"
cached_tasks = cache.get(cache_key)
if isinstance(cached_tasks, list):
return cached_tasks
else:
# get agent tasks based on policies
tasks = Policy.get_policy_tasks(self)
cache.set(cache_key, tasks, 600)
return tasks
async def nats_cmd(
self, data: Dict[Any, Any], timeout: int = 30, wait: bool = True
) -> Any:
opts = setup_nats_options()
try:
nc = await nats.connect(**opts)
except:
return "natsdown"
if wait:
try:
msg = await nc.request(
self.agent_id, msgpack.dumps(data), timeout=timeout
)
except TimeoutError:
ret = "timeout"
else:
try:
ret = msgpack.loads(msg.data)
except Exception as e:
ret = str(e)
logger.error(e)
await nc.close()
return ret
else:
await nc.publish(self.agent_id, msgpack.dumps(data))
await nc.flush()
await nc.close()
def recover(self, mode: str, mesh_uri: str, wait: bool = True) -> tuple[str, bool]:
"""
Return type: tuple(message: str, error: bool)
"""
if mode == "tacagent":
if self.plat == AgentPlat.LINUX:
cmd = "systemctl restart tacticalagent.service"
shell = 3
elif self.plat == AgentPlat.DARWIN:
cmd = "launchctl kickstart -k system/tacticalagent"
shell = 3
else:
cmd = "net stop tacticalrmm & taskkill /F /IM tacticalrmm.exe & net start tacticalrmm"
shell = 1
asyncio.run(
send_command_with_mesh(cmd, mesh_uri, self.mesh_node_id, shell, 0)
)
return "ok", False
elif mode == "mesh":
data = {"func": "recover", "payload": {"mode": mode}}
if wait:
r = asyncio.run(self.nats_cmd(data, timeout=20))
if r == "ok":
return "ok", False
else:
return str(r), True
else:
asyncio.run(self.nats_cmd(data, timeout=20, wait=False))
return "ok", False
return "invalid", True
def serialize(agent: "Agent") -> Dict[str, Any]:
# serializes the agent and returns json
from .serializers import AgentAuditSerializer
return AgentAuditSerializer(agent).data
def delete_superseded_updates(self) -> None:
with suppress(Exception):
pks = [] # list of pks to delete
kbs = list(self.winupdates.values_list("kb", flat=True))
d = Counter(kbs)
dupes = [k for k, v in d.items() if v > 1]
for dupe in dupes:
titles = self.winupdates.filter(kb=dupe).values_list("title", flat=True)
# extract the version from the title and sort from oldest to newest
# skip if no version info is available therefore nothing to parse
try:
matches = r"(Version|Versão)"
pattern = r"\(" + matches + r"(.*?)\)"
vers = [
re.search(pattern, i, flags=re.IGNORECASE).group(2).strip()
for i in titles
]
sorted_vers = sorted(vers, key=LooseVersion)
except:
continue
# append all but the latest version to our list of pks to delete
for ver in sorted_vers[:-1]:
q = self.winupdates.filter(kb=dupe).filter(title__contains=ver)
pks.append(q.first().pk)
pks = list(set(pks))
self.winupdates.filter(pk__in=pks).delete()
def should_create_alert(
self, alert_template: "Optional[AlertTemplate]" = None
) -> bool:
return bool(
self.overdue_dashboard_alert
or self.overdue_email_alert
or self.overdue_text_alert
or (
alert_template
and (
alert_template.agent_always_alert
or alert_template.agent_always_email
or alert_template.agent_always_text
)
)
)
def send_outage_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
(
f"Data has not been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"within the expected time."
),
alert_template=self.alert_template,
)
def send_recovery_email(self) -> None:
CORE = get_core_settings()
CORE.send_mail(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
(
f"Data has been received from client {self.client.name}, "
f"site {self.site.name}, "
f"agent {self.hostname} "
"after an interruption in data transmission."
),
alert_template=self.alert_template,
)
def send_outage_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data overdue",
alert_template=self.alert_template,
)
def send_recovery_sms(self) -> None:
CORE = get_core_settings()
CORE.send_sms(
f"{self.client.name}, {self.site.name}, {self.hostname} - data received",
alert_template=self.alert_template,
)
class AutomatedTask(BaseAuditModel):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="autotasks",
on_delete=models.CASCADE,
null=True,
blank=True,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.CASCADE,
)
custom_field = models.ForeignKey(
"core.CustomField",
related_name="autotasks",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
# format -> [{"type": "script", "script": 1, "name": "Script Name", "timeout": 90, "script_args": [], "env_vars": []}, {"type": "cmd", "command": "whoami", "timeout": 90}]
actions = JSONField(default=list)
assigned_check = models.ForeignKey(
"checks.Check",
null=True,
blank=True,
related_name="assignedtasks",
on_delete=models.SET_NULL,
)
name = models.CharField(max_length=255)
collector_all_output = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
continue_on_error = models.BooleanField(default=True)
alert_severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_alert = models.BooleanField(default=False)
text_alert = models.BooleanField(default=False)
dashboard_alert = models.BooleanField(default=False)
# options sent to agent for task creation
# general task settings
task_type = models.CharField(
max_length=100, choices=TaskType.choices, default=TaskType.MANUAL
)
win_task_name = models.CharField(
max_length=255, unique=True, blank=True, default=generate_task_name
) # should be changed to unique=True
run_time_date = DateTimeField(null=True, blank=True)
expire_date = DateTimeField(null=True, blank=True)
# daily
daily_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(255)]
)
# weekly
run_time_bit_weekdays = models.IntegerField(null=True, blank=True)
weekly_interval = models.PositiveSmallIntegerField(
blank=True, null=True, validators=[MinValueValidator(1), MaxValueValidator(52)]
)
run_time_minute = models.CharField(
max_length=5, null=True, blank=True
) # deprecated
# monthly
monthly_days_of_month = models.PositiveBigIntegerField(blank=True, null=True)
monthly_months_of_year = models.PositiveIntegerField(blank=True, null=True)
# monthly days of week
monthly_weeks_of_month = models.PositiveSmallIntegerField(blank=True, null=True)
# additional task settings
task_repetition_duration = models.CharField(max_length=10, null=True, blank=True)
task_repetition_interval = models.CharField(max_length=10, null=True, blank=True)
stop_task_at_duration_end = models.BooleanField(blank=True, default=False)
random_task_delay = models.CharField(max_length=10, null=True, blank=True)
remove_if_not_scheduled = models.BooleanField(default=False)
run_asap_after_missed = models.BooleanField(default=False) # added in agent v1.4.7
task_instance_policy = models.PositiveSmallIntegerField(blank=True, default=1)
# deprecated
managed_by_policy = models.BooleanField(default=False)
# non-database property
task_result: "Union[TaskResult, Dict[None, None]]" = {}
def __str__(self) -> str:
return self.name
def save(self, *args, **kwargs) -> None:
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
# get old task if exists
old_task = AutomatedTask.objects.get(pk=self.pk) if self.pk else None
super().save(old_model=old_task, *args, **kwargs)
# check if fields were updated that require a sync to the agent and set status to notsynced
if old_task:
for field in self.fields_that_trigger_task_update_on_agent:
if getattr(self, field) != getattr(old_task, field):
if self.policy:
TaskResult.objects.exclude(
sync_status=TaskSyncStatus.INITIAL
).filter(task__policy_id=self.policy.id).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
else:
TaskResult.objects.filter(agent=self.agent, task=self).update(
sync_status=TaskSyncStatus.NOT_SYNCED
)
def delete(self, *args, **kwargs):
# if task is a policy task clear cache on everything
if self.policy:
cache.delete_many_pattern("site_*_tasks")
cache.delete_many_pattern("agent_*_tasks")
super().delete(*args, **kwargs)
def schedule(self) -> Optional[str]:
if self.task_type == TaskType.MANUAL:
return "Manual"
elif self.task_type == TaskType.CHECK_FAILURE:
return "Every time check fails"
elif self.task_type == TaskType.RUN_ONCE:
return f'Run once on {self.run_time_date.strftime("%m/%d/%Y %I:%M%p")}'
elif self.task_type == TaskType.DAILY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
if self.daily_interval == 1:
return f"Daily at {run_time_nice}"
else:
return f"Every {self.daily_interval} days at {run_time_nice}"
elif self.task_type == TaskType.WEEKLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
days = bitdays_to_string(self.run_time_bit_weekdays)
if self.weekly_interval != 1:
return f"{days} at {run_time_nice}"
else:
return f"{days} at {run_time_nice} every {self.weekly_interval} weeks"
elif self.task_type == TaskType.MONTHLY:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
days = bitmonthdays_to_string(self.monthly_days_of_month)
return f"Runs on {months} on days {days} at {run_time_nice}"
elif self.task_type == TaskType.MONTHLY_DOW:
run_time_nice = self.run_time_date.strftime("%I:%M%p")
months = bitmonths_to_string(self.monthly_months_of_year)
weeks = bitweeks_to_string(self.monthly_weeks_of_month)
days = bitdays_to_string(self.run_time_bit_weekdays)
return f"Runs on {months} on {weeks} on {days} at {run_time_nice}"
elif self.task_type == TaskType.ONBOARDING:
return "Onboarding: Runs once on task creation."
return None
def fields_that_trigger_task_update_on_agent(self) -> List[str]:
return FIELDS_TRIGGER_TASK_UPDATE_AGENT
def serialize(task):
# serializes the task and returns json
from .serializers import TaskAuditSerializer
return TaskAuditSerializer(task).data
def create_policy_task(
self, policy: "Policy", assigned_check: "Optional[Check]" = None
) -> None:
# Copies certain properties on this task (self) to a new task and sets it to the supplied Policy
task = AutomatedTask.objects.create(
policy=policy,
assigned_check=assigned_check,
)
for field in POLICY_TASK_FIELDS_TO_COPY:
setattr(task, field, getattr(self, field))
task.save()
# agent version >= 1.8.0
def generate_nats_task_payload(self) -> Dict[str, Any]:
task = {
"pk": self.pk,
"type": "rmm",
"name": self.win_task_name,
"overwrite_task": True,
"enabled": self.enabled,
"trigger": (
self.task_type
if self.task_type != TaskType.CHECK_FAILURE
else TaskType.MANUAL
),
"multiple_instances": self.task_instance_policy or 0,
"delete_expired_task_after": (
self.remove_if_not_scheduled if self.expire_date else False
),
"start_when_available": (
self.run_asap_after_missed
if self.task_type != TaskType.RUN_ONCE
else True
),
}
if self.task_type in (
TaskType.DAILY,
TaskType.WEEKLY,
TaskType.MONTHLY,
TaskType.MONTHLY_DOW,
TaskType.RUN_ONCE,
):
if not self.run_time_date:
self.run_time_date = djangotime.now()
task["start_year"] = self.run_time_date.year
task["start_month"] = self.run_time_date.month
task["start_day"] = self.run_time_date.day
task["start_hour"] = self.run_time_date.hour
task["start_min"] = self.run_time_date.minute
if self.expire_date:
task["expire_year"] = self.expire_date.year
task["expire_month"] = self.expire_date.month
task["expire_day"] = self.expire_date.day
task["expire_hour"] = self.expire_date.hour
task["expire_min"] = self.expire_date.minute
if self.random_task_delay:
task["random_delay"] = convert_to_iso_duration(self.random_task_delay)
if self.task_repetition_interval and self.task_repetition_duration:
task["repetition_interval"] = convert_to_iso_duration(
self.task_repetition_interval
)
task["repetition_duration"] = convert_to_iso_duration(
self.task_repetition_duration
)
task["stop_at_duration_end"] = self.stop_task_at_duration_end
if self.task_type == TaskType.DAILY:
task["day_interval"] = self.daily_interval
elif self.task_type == TaskType.WEEKLY:
task["week_interval"] = self.weekly_interval
task["days_of_week"] = self.run_time_bit_weekdays
elif self.task_type == TaskType.MONTHLY:
# check if "last day is configured"
if self.monthly_days_of_month >= 0x80000000:
task["days_of_month"] = self.monthly_days_of_month - 0x80000000
task["run_on_last_day_of_month"] = True
else:
task["days_of_month"] = self.monthly_days_of_month
task["run_on_last_day_of_month"] = False
task["months_of_year"] = self.monthly_months_of_year
elif self.task_type == TaskType.MONTHLY_DOW:
task["days_of_week"] = self.run_time_bit_weekdays
task["months_of_year"] = self.monthly_months_of_year
task["weeks_of_month"] = self.monthly_weeks_of_month
return task
def create_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.INITIAL
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to create scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully created."
)
return "ok"
def modify_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "schedtask",
"schedtaskpayload": self.generate_nats_task_payload(),
}
logger.debug(nats_data)
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok":
task_result.sync_status = TaskSyncStatus.NOT_SYNCED
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to modify scheduled task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
task_result.sync_status = TaskSyncStatus.SYNCED
task_result.save(update_fields=["sync_status"])
logger.info(
f"{task_result.agent.hostname} task {self.name} was successfully modified."
)
return "ok"
def delete_task_on_agent(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": self.win_task_name},
}
r = asyncio.run(task_result.agent.nats_cmd(nats_data, timeout=10))
if r != "ok" and "The system cannot find the file specified" not in r:
task_result.sync_status = TaskSyncStatus.PENDING_DELETION
with suppress(DatabaseError):
task_result.save(update_fields=["sync_status"])
logger.error(
f"Unable to delete task {self.name} on {task_result.agent.hostname}: {r}"
)
return "timeout"
else:
self.delete()
logger.info(f"{task_result.agent.hostname} task {self.name} was deleted.")
return "ok"
def run_win_task(self, agent: "Optional[Agent]" = None) -> str:
if self.policy and not agent:
return "agent parameter needs to be passed with policy task"
else:
agent = agent if self.policy else self.agent
try:
task_result = TaskResult.objects.get(agent=agent, task=self)
except TaskResult.DoesNotExist:
task_result = TaskResult(agent=agent, task=self)
task_result.save()
asyncio.run(
task_result.agent.nats_cmd(
{"func": "runtask", "taskpk": self.pk}, wait=False
)
)
return "ok"
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.task_always_alert
or alert_template.task_always_email
or alert_template.task_always_text
)
)
)
def run_win_task(pk: int, agent_id: Optional[str] = None) -> str:
with suppress(
AutomatedTask.DoesNotExist,
Agent.DoesNotExist,
):
task = AutomatedTask.objects.get(pk=pk)
if agent_id:
task.run_win_task(Agent.objects.get(agent_id=agent_id))
else:
task.run_win_task()
return "ok" | null |
189,134 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
class TaskResult(models.Model):
class Meta:
unique_together = (("agent", "task"),)
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="taskresults",
on_delete=models.CASCADE,
)
task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="taskresults",
on_delete=models.CASCADE,
)
retcode = models.BigIntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
execution_time = models.CharField(max_length=100, default="0.0000")
last_run = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=30, choices=TaskStatus.choices, default=TaskStatus.PENDING
)
sync_status = models.CharField(
max_length=100, choices=TaskSyncStatus.choices, default=TaskSyncStatus.INITIAL
)
def __str__(self):
return f"{self.agent.hostname} - {self.task}"
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_task_alert(
self.task,
agent=self.agent,
skip_create=not self.task.should_create_alert(alert_template),
)
def save_collector_results(self) -> None:
agent_field = self.task.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.task.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def send_email(self):
CORE = get_core_settings()
# Format of Email sent when Task has email alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
# Format of SMS sent when Task has SMS alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def handle_task_email_alert(pk: int, alert_interval: Union[float, None] = None) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending email
if not alert.email_sent:
task_result = TaskResult.objects.get(
task=alert.assigned_task, agent=alert.agent
)
sleep(rand_range(100, 1500))
task_result.send_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
else:
if alert_interval:
# send an email only if the last email sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.email_sent < delta:
task_result = TaskResult.objects.get(
task=alert.assigned_task, agent=alert.agent
)
sleep(rand_range(100, 1500))
task_result.send_email()
alert.email_sent = djangotime.now()
alert.save(update_fields=["email_sent"])
return "ok" | null |
189,135 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
class TaskResult(models.Model):
class Meta:
unique_together = (("agent", "task"),)
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="taskresults",
on_delete=models.CASCADE,
)
task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="taskresults",
on_delete=models.CASCADE,
)
retcode = models.BigIntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
execution_time = models.CharField(max_length=100, default="0.0000")
last_run = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=30, choices=TaskStatus.choices, default=TaskStatus.PENDING
)
sync_status = models.CharField(
max_length=100, choices=TaskSyncStatus.choices, default=TaskSyncStatus.INITIAL
)
def __str__(self):
return f"{self.agent.hostname} - {self.task}"
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_task_alert(
self.task,
agent=self.agent,
skip_create=not self.task.should_create_alert(alert_template),
)
def save_collector_results(self) -> None:
agent_field = self.task.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.task.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def send_email(self):
CORE = get_core_settings()
# Format of Email sent when Task has email alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
# Format of SMS sent when Task has SMS alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def handle_task_sms_alert(pk: int, alert_interval: Union[float, None] = None) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending text
if not alert.sms_sent:
task_result = TaskResult.objects.get(
task=alert.assigned_task, agent=alert.agent
)
sleep(rand_range(100, 1500))
task_result.send_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
else:
if alert_interval:
# send a text only if the last text sent is older than alert interval
delta = djangotime.now() - dt.timedelta(days=alert_interval)
if alert.sms_sent < delta:
task_result = TaskResult.objects.get(
task=alert.assigned_task, agent=alert.agent
)
sleep(rand_range(100, 1500))
task_result.send_sms()
alert.sms_sent = djangotime.now()
alert.save(update_fields=["sms_sent"])
return "ok" | null |
189,136 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
class TaskResult(models.Model):
class Meta:
unique_together = (("agent", "task"),)
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="taskresults",
on_delete=models.CASCADE,
)
task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="taskresults",
on_delete=models.CASCADE,
)
retcode = models.BigIntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
execution_time = models.CharField(max_length=100, default="0.0000")
last_run = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=30, choices=TaskStatus.choices, default=TaskStatus.PENDING
)
sync_status = models.CharField(
max_length=100, choices=TaskSyncStatus.choices, default=TaskSyncStatus.INITIAL
)
def __str__(self):
return f"{self.agent.hostname} - {self.task}"
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_task_alert(
self.task,
agent=self.agent,
skip_create=not self.task.should_create_alert(alert_template),
)
def save_collector_results(self) -> None:
agent_field = self.task.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.task.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def send_email(self):
CORE = get_core_settings()
# Format of Email sent when Task has email alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
# Format of SMS sent when Task has SMS alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def handle_resolved_task_sms_alert(pk: int) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending text
if not alert.resolved_sms_sent:
task_result = TaskResult.objects.get(
task=alert.assigned_task, agent=alert.agent
)
sleep(rand_range(100, 1500))
task_result.send_resolved_sms()
alert.resolved_sms_sent = djangotime.now()
alert.save(update_fields=["resolved_sms_sent"])
return "ok" | null |
189,137 | import asyncio
import datetime as dt
from collections import namedtuple
from contextlib import suppress
from time import sleep
from typing import TYPE_CHECKING, Optional, Union
import msgpack
import nats
from django.utils import timezone as djangotime
from nats.errors import TimeoutError
from agents.models import Agent
from alerts.models import Alert
from autotasks.models import AutomatedTask, TaskResult
from tacticalrmm.celery import app
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ORPHANED_WIN_TASK_LOCK
from tacticalrmm.helpers import rand_range, setup_nats_options
from tacticalrmm.utils import redis_lock
class Alert(models.Model):
objects = PermissionQuerySet.as_manager()
agent = models.ForeignKey(
"agents.Agent",
related_name="agent",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_check = models.ForeignKey(
"checks.Check",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
assigned_task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="alert",
on_delete=models.CASCADE,
null=True,
blank=True,
)
alert_type = models.CharField(
max_length=20, choices=AlertType.choices, default=AlertType.AVAILABILITY
)
message = models.TextField(null=True, blank=True)
alert_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
snoozed = models.BooleanField(default=False)
snooze_until = models.DateTimeField(null=True, blank=True)
resolved = models.BooleanField(default=False)
resolved_on = models.DateTimeField(null=True, blank=True)
severity = models.CharField(
max_length=30, choices=AlertSeverity.choices, default=AlertSeverity.INFO
)
email_sent = models.DateTimeField(null=True, blank=True)
resolved_email_sent = models.DateTimeField(null=True, blank=True)
sms_sent = models.DateTimeField(null=True, blank=True)
resolved_sms_sent = models.DateTimeField(null=True, blank=True)
hidden = models.BooleanField(default=False)
action_run = models.DateTimeField(null=True, blank=True)
action_stdout = models.TextField(null=True, blank=True)
action_stderr = models.TextField(null=True, blank=True)
action_retcode = models.BigIntegerField(null=True, blank=True)
action_execution_time = models.CharField(max_length=100, null=True, blank=True)
resolved_action_run = models.DateTimeField(null=True, blank=True)
resolved_action_stdout = models.TextField(null=True, blank=True)
resolved_action_stderr = models.TextField(null=True, blank=True)
resolved_action_retcode = models.BigIntegerField(null=True, blank=True)
resolved_action_execution_time = models.CharField(
max_length=100, null=True, blank=True
)
def __str__(self) -> str:
return f"{self.alert_type} - {self.message}"
def assigned_agent(self) -> "Optional[Agent]":
return self.agent
def site(self) -> "Site":
return self.agent.site
def client(self) -> "Client":
return self.agent.client
def resolve(self) -> None:
self.resolved = True
self.resolved_on = djangotime.now()
self.snoozed = False
self.snooze_until = None
self.save(update_fields=["resolved", "resolved_on", "snoozed", "snooze_until"])
def create_or_return_availability_alert(
cls, agent: Agent, skip_create: bool = False
) -> Optional[Alert]:
if not cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
agent=agent,
alert_type=AlertType.AVAILABILITY,
severity=AlertSeverity.ERROR,
message=f"{agent.hostname} in {agent.client.name}\\{agent.site.name} is overdue.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
agent=agent, alert_type=AlertType.AVAILABILITY, resolved=False
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_check_alert(
cls,
check: "Check",
agent: "Agent",
alert_severity: Optional[str] = None,
skip_create: bool = False,
) -> "Optional[Alert]":
# need to pass agent if the check is a policy
if not cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_check=check,
agent=agent,
alert_type=AlertType.CHECK,
severity=(
check.alert_severity
if check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else alert_severity
),
message=f"{agent.hostname} has a {check.check_type} check: {check.readable_desc} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_check=check,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_check=check,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def create_or_return_task_alert(
cls,
task: "AutomatedTask",
agent: "Agent",
skip_create: bool = False,
) -> "Optional[Alert]":
if not cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
).exists():
if skip_create:
return None
return cast(
Alert,
cls.objects.create(
assigned_task=task,
agent=agent,
alert_type=AlertType.TASK,
severity=task.alert_severity,
message=f"{agent.hostname} has task: {task.name} that failed.",
hidden=True,
),
)
else:
try:
return cast(
Alert,
cls.objects.get(
assigned_task=task,
agent=agent,
resolved=False,
),
)
except cls.MultipleObjectsReturned:
alerts = cls.objects.filter(
assigned_task=task,
agent=agent,
resolved=False,
)
last_alert = cast(Alert, alerts.last())
# cycle through other alerts and resolve
for alert in alerts:
if alert.id != last_alert.pk:
alert.resolve()
return last_alert
except cls.DoesNotExist:
return None
def handle_alert_failure(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
dashboard_severities = None
email_severities = None
text_severities = None
always_dashboard = None
always_email = None
always_text = None
alert_interval = None
email_task = None
text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_outage_email_task, agent_outage_sms_task
email_task = agent_outage_email_task
text_task = agent_outage_sms_task
email_alert = instance.overdue_email_alert
text_alert = instance.overdue_text_alert
dashboard_alert = instance.overdue_dashboard_alert
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
alert_severity = AlertSeverity.ERROR
agent = instance
dashboard_severities = [AlertSeverity.ERROR]
email_severities = [AlertSeverity.ERROR]
text_severities = [AlertSeverity.ERROR]
# set alert_template settings
if alert_template:
always_dashboard = alert_template.agent_always_alert
always_email = alert_template.agent_always_email
always_text = alert_template.agent_always_text
alert_interval = alert_template.agent_periodic_alert_days
run_script_action = alert_template.agent_script_actions
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_check_email_alert_task,
handle_check_sms_alert_task,
)
email_task = handle_check_email_alert_task
text_task = handle_check_sms_alert_task
email_alert = instance.assigned_check.email_alert
text_alert = instance.assigned_check.text_alert
dashboard_alert = instance.assigned_check.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = (
instance.assigned_check.alert_severity
if instance.assigned_check.check_type
not in {
CheckType.MEMORY,
CheckType.CPU_LOAD,
CheckType.DISK_SPACE,
CheckType.SCRIPT,
}
else instance.alert_severity
)
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = (
alert_template.check_dashboard_alert_severity
or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
AlertSeverity.INFO,
]
)
email_severities = alert_template.check_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.check_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.check_always_alert
always_email = alert_template.check_always_email
always_text = alert_template.check_always_text
alert_interval = alert_template.check_periodic_alert_days
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import handle_task_email_alert, handle_task_sms_alert
email_task = handle_task_email_alert
text_task = handle_task_sms_alert
email_alert = instance.task.email_alert
text_alert = instance.task.text_alert
dashboard_alert = instance.task.dashboard_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
alert_severity = instance.task.alert_severity
agent = instance.agent
# set alert_template settings
if alert_template:
dashboard_severities = alert_template.task_dashboard_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
email_severities = alert_template.task_email_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
text_severities = alert_template.task_text_alert_severity or [
AlertSeverity.ERROR,
AlertSeverity.WARNING,
]
always_dashboard = alert_template.task_always_alert
always_email = alert_template.task_always_email
always_text = alert_template.task_always_text
alert_interval = alert_template.task_periodic_alert_days
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
# check if alert severity changed and update the alert
if alert_severity != alert.severity:
alert.severity = alert_severity
alert.save(update_fields=["severity"])
# create alert in dashboard if enabled
if dashboard_alert or always_dashboard:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and dashboard_severities
and alert.severity in dashboard_severities
):
alert.hidden = False
alert.save(update_fields=["hidden"])
# send email if enabled
if email_alert or always_email:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and email_severities
and alert.severity in email_severities
):
email_task.delay(
pk=alert.pk,
alert_interval=alert_interval,
)
# send text if enabled
if text_alert or always_text:
# check if alert template is set and specific severities are configured
if (
not alert_template
or alert_template
and text_severities
and alert.severity in text_severities
):
text_task.delay(pk=alert.pk, alert_interval=alert_interval)
# check if any scripts should be run
if (
alert_template
and alert_template.action
and run_script_action
and not alert.action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-failure",
)
r = agent.run_script(
scriptpk=alert_template.action.pk,
args=alert.parse_script_args(alert_template.action_args),
timeout=alert_template.action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.action_retcode = r["retcode"]
alert.action_stdout = r["stdout"]
alert.action_stderr = r["stderr"]
alert.action_execution_time = "{:.4f}".format(r["execution_time"])
alert.action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Failure action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) failure alert",
)
def handle_alert_resolve(
cls, instance: Union[Agent, TaskResult, CheckResult]
) -> None:
from agents.models import Agent, AgentHistory
from autotasks.models import TaskResult
from checks.models import CheckResult
# set variables
email_on_resolved = False
text_on_resolved = False
resolved_email_task = None
resolved_text_task = None
run_script_action = None
# check what the instance passed is
if isinstance(instance, Agent):
from agents.tasks import agent_recovery_email_task, agent_recovery_sms_task
resolved_email_task = agent_recovery_email_task
resolved_text_task = agent_recovery_sms_task
alert_template = instance.alert_template
maintenance_mode = instance.maintenance_mode
agent = instance
if alert_template:
email_on_resolved = alert_template.agent_email_on_resolved
text_on_resolved = alert_template.agent_text_on_resolved
run_script_action = alert_template.agent_script_actions
if agent.overdue_email_alert:
email_on_resolved = True
if agent.overdue_text_alert:
text_on_resolved = True
elif isinstance(instance, CheckResult):
from checks.tasks import (
handle_resolved_check_email_alert_task,
handle_resolved_check_sms_alert_task,
)
resolved_email_task = handle_resolved_check_email_alert_task
resolved_text_task = handle_resolved_check_sms_alert_task
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.check_email_on_resolved
text_on_resolved = alert_template.check_text_on_resolved
run_script_action = alert_template.check_script_actions
elif isinstance(instance, TaskResult):
from autotasks.tasks import (
handle_resolved_task_email_alert,
handle_resolved_task_sms_alert,
)
resolved_email_task = handle_resolved_task_email_alert
resolved_text_task = handle_resolved_task_sms_alert
alert_template = instance.agent.alert_template
maintenance_mode = instance.agent.maintenance_mode
agent = instance.agent
if alert_template:
email_on_resolved = alert_template.task_email_on_resolved
text_on_resolved = alert_template.task_text_on_resolved
run_script_action = alert_template.task_script_actions
else:
return
alert = instance.get_or_create_alert_if_needed(alert_template)
# return if agent is in maintenance mode
if not alert or maintenance_mode:
return
alert.resolve()
# check if a resolved email notification should be send
if email_on_resolved and not alert.resolved_email_sent:
resolved_email_task.delay(pk=alert.pk)
# check if resolved text should be sent
if text_on_resolved and not alert.resolved_sms_sent:
resolved_text_task.delay(pk=alert.pk)
# check if resolved script should be run
if (
alert_template
and alert_template.resolved_action
and run_script_action
and not alert.resolved_action_run
):
hist = AgentHistory.objects.create(
agent=agent,
type=AgentHistoryType.SCRIPT_RUN,
script=alert_template.action,
username="alert-action-resolved",
)
r = agent.run_script(
scriptpk=alert_template.resolved_action.pk,
args=alert.parse_script_args(alert_template.resolved_action_args),
timeout=alert_template.resolved_action_timeout,
wait=True,
history_pk=hist.pk,
full=True,
run_on_any=True,
run_as_user=False,
env_vars=alert_template.resolved_action_env_vars,
)
# command was successful
if isinstance(r, dict):
alert.resolved_action_retcode = r["retcode"]
alert.resolved_action_stdout = r["stdout"]
alert.resolved_action_stderr = r["stderr"]
alert.resolved_action_execution_time = "{:.4f}".format(
r["execution_time"]
)
alert.resolved_action_run = djangotime.now()
alert.save()
else:
DebugLog.error(
agent=agent,
log_type=DebugLogType.SCRIPTING,
message=f"Resolved action: {alert_template.action.name} failed to run on any agent for {agent.hostname}({agent.pk}) resolved alert",
)
def parse_script_args(self, args: List[str]) -> List[str]:
if not args:
return []
temp_args = []
# pattern to match for injection
pattern = re.compile(".*\\{\\{alert\\.(.*)\\}\\}.*")
for arg in args:
if match := pattern.match(arg):
name = match.group(1)
# check if attr exists and isn't a function
if hasattr(self, name) and not callable(getattr(self, name)):
value = f"'{getattr(self, name)}'"
else:
continue
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg))
except re.error:
temp_args.append(re.sub("\\{\\{.*\\}\\}", re.escape(value), arg))
except Exception as e:
DebugLog.error(log_type=DebugLogType.SCRIPTING, message=str(e))
continue
else:
temp_args.append(arg)
return temp_args
class TaskResult(models.Model):
class Meta:
unique_together = (("agent", "task"),)
objects = PermissionQuerySet.as_manager()
id = models.BigAutoField(primary_key=True)
agent = models.ForeignKey(
"agents.Agent",
related_name="taskresults",
on_delete=models.CASCADE,
)
task = models.ForeignKey(
"autotasks.AutomatedTask",
related_name="taskresults",
on_delete=models.CASCADE,
)
retcode = models.BigIntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
execution_time = models.CharField(max_length=100, default="0.0000")
last_run = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=30, choices=TaskStatus.choices, default=TaskStatus.PENDING
)
sync_status = models.CharField(
max_length=100, choices=TaskSyncStatus.choices, default=TaskSyncStatus.INITIAL
)
def __str__(self):
return f"{self.agent.hostname} - {self.task}"
def get_or_create_alert_if_needed(
self, alert_template: "Optional[AlertTemplate]"
) -> "Optional[Alert]":
from alerts.models import Alert
return Alert.create_or_return_task_alert(
self.task,
agent=self.agent,
skip_create=not self.task.should_create_alert(alert_template),
)
def save_collector_results(self) -> None:
agent_field = self.task.custom_field.get_or_create_field_value(self.agent)
value = (
self.stdout.strip()
if self.task.collector_all_output
else self.stdout.strip().split("\n")[-1].strip()
)
agent_field.save_to_field(value)
def send_email(self):
CORE = get_core_settings()
# Format of Email sent when Task has email alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, self.agent.alert_template)
def send_sms(self):
CORE = get_core_settings()
# Format of SMS sent when Task has SMS alert
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = get_core_settings()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
CORE.send_sms(body, alert_template=self.agent.alert_template)
def handle_resolved_task_email_alert(pk: int) -> str:
try:
alert = Alert.objects.get(pk=pk)
except Alert.DoesNotExist:
return "alert not found"
# first time sending email
if not alert.resolved_email_sent:
task_result = TaskResult.objects.get(
task=alert.assigned_task, agent=alert.agent
)
sleep(rand_range(100, 1500))
task_result.send_resolved_email()
alert.resolved_email_sent = djangotime.now()
alert.save(update_fields=["resolved_email_sent"])
return "ok" | null |
189,138 | import re
import setuptools
import sys
def get_package_dir():
pkg_dir = {
"yolox.tools": "tools",
"yolox.exp.default": "exps/default",
}
return pkg_dir | null |
189,139 | import re
import setuptools
import sys
def get_install_requirements():
with open("requirements.txt", "r", encoding="utf-8") as f:
reqs = [x.strip() for x in f.read().splitlines()]
reqs = [x for x in reqs if not x.startswith("#")]
return reqs | null |
189,140 | import re
import setuptools
import sys
def get_yolox_version():
with open("yolox/__init__.py", "r") as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(), re.MULTILINE
).group(1)
return version | null |
189,141 | import re
import setuptools
import sys
def get_long_description():
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
return long_description | null |
189,142 | import re
import setuptools
import sys
TORCH_AVAILABLE = True
def get_ext_modules():
ext_module = []
if sys.platform != "win32": # pre-compile ops on linux
assert TORCH_AVAILABLE, "torch is required for pre-compiling ops, please install it first."
# if any other op is added, please also add it here
from yolox.layers import FastCOCOEvalOp
ext_module.append(FastCOCOEvalOp().build_op())
return ext_module | null |
189,143 | import re
import setuptools
import sys
TORCH_AVAILABLE = True
def get_cmd_class():
cmdclass = {}
if TORCH_AVAILABLE:
cmdclass["build_ext"] = cpp_extension.BuildExtension
return cmdclass | null |
189,144 | import os
import sys
from unittest import mock
from sphinx.domains import Domain
from typing import Dict, List, Tuple
import sphinx_rtd_theme
class GithubURLDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
from recommonmark.parser import CommonMarkParser
import yolox
def autodoc_skip_member(app, what, name, obj, skip, options):
def setup(app):
from recommonmark.transform import AutoStructify
app.add_domain(GithubURLDomain)
app.connect("autodoc-skip-member", autodoc_skip_member)
# app.add_role("paper", paper_ref_role)
app.add_config_value(
"recommonmark_config",
{"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True},
True,
)
app.add_transform(AutoStructify) | null |
189,145 | import os
import random
import torch
import torch.distributed as dist
import torch.nn as nn
from .base_exp import BaseExp
class Exp(BaseExp):
def __init__(self):
super().__init__()
# ---------------- model config ---------------- #
# detect classes number of model
self.num_classes = 80
# factor of model depth
self.depth = 1.00
# factor of model width
self.width = 1.00
# activation name. For example, if using "relu", then "silu" will be replaced to "relu".
self.act = "silu"
# ---------------- dataloader config ---------------- #
# set worker to 4 for shorter dataloader init time
# If your training process cost many memory, reduce this value.
self.data_num_workers = 4
self.input_size = (640, 640) # (height, width)
# Actual multiscale ranges: [640 - 5 * 32, 640 + 5 * 32].
# To disable multiscale training, set the value to 0.
self.multiscale_range = 5
# You can uncomment this line to specify a multiscale range
# self.random_size = (14, 26)
# dir of dataset images, if data_dir is None, this project will use `datasets` dir
self.data_dir = None
# name of annotation file for training
self.train_ann = "instances_train2017.json"
# name of annotation file for evaluation
self.val_ann = "instances_val2017.json"
# name of annotation file for testing
self.test_ann = "instances_test2017.json"
# --------------- transform config ----------------- #
# prob of applying mosaic aug
self.mosaic_prob = 1.0
# prob of applying mixup aug
self.mixup_prob = 1.0
# prob of applying hsv aug
self.hsv_prob = 1.0
# prob of applying flip aug
self.flip_prob = 0.5
# rotation angle range, for example, if set to 2, the true range is (-2, 2)
self.degrees = 10.0
# translate range, for example, if set to 0.1, the true range is (-0.1, 0.1)
self.translate = 0.1
self.mosaic_scale = (0.1, 2)
# apply mixup aug or not
self.enable_mixup = True
self.mixup_scale = (0.5, 1.5)
# shear angle range, for example, if set to 2, the true range is (-2, 2)
self.shear = 2.0
# -------------- training config --------------------- #
# epoch number used for warmup
self.warmup_epochs = 5
# max training epoch
self.max_epoch = 300
# minimum learning rate during warmup
self.warmup_lr = 0
self.min_lr_ratio = 0.05
# learning rate for one image. During training, lr will multiply batchsize.
self.basic_lr_per_img = 0.01 / 64.0
# name of LRScheduler
self.scheduler = "yoloxwarmcos"
# last #epoch to close augmention like mosaic
self.no_aug_epochs = 15
# apply EMA during training
self.ema = True
# weight decay of optimizer
self.weight_decay = 5e-4
# momentum of optimizer
self.momentum = 0.9
# log period in iter, for example,
# if set to 1, user could see log every iteration.
self.print_interval = 10
# eval period in epoch, for example,
# if set to 1, model will be evaluate after every epoch.
self.eval_interval = 10
# save history checkpoint or not.
# If set to False, yolox will only save latest and best ckpt.
self.save_history_ckpt = True
# name of experiment
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
# ----------------- testing config ------------------ #
# output image size during evaluation/test
self.test_size = (640, 640)
# confidence threshold during evaluation/test,
# boxes whose scores are less than test_conf will be filtered
self.test_conf = 0.01
# nms threshold
self.nmsthre = 0.65
def get_model(self):
from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead
def init_yolo(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03
if getattr(self, "model", None) is None:
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, act=self.act)
head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, act=self.act)
self.model = YOLOX(backbone, head)
self.model.apply(init_yolo)
self.model.head.initialize_biases(1e-2)
self.model.train()
return self.model
def get_dataset(self, cache: bool = False, cache_type: str = "ram"):
"""
Get dataset according to cache and cache_type parameters.
Args:
cache (bool): Whether to cache imgs to ram or disk.
cache_type (str, optional): Defaults to "ram".
"ram" : Caching imgs to ram for fast training.
"disk": Caching imgs to disk for fast training.
"""
from yolox.data import COCODataset, TrainTransform
return COCODataset(
data_dir=self.data_dir,
json_file=self.train_ann,
img_size=self.input_size,
preproc=TrainTransform(
max_labels=50,
flip_prob=self.flip_prob,
hsv_prob=self.hsv_prob
),
cache=cache,
cache_type=cache_type,
)
def get_data_loader(self, batch_size, is_distributed, no_aug=False, cache_img: str = None):
"""
Get dataloader according to cache_img parameter.
Args:
no_aug (bool, optional): Whether to turn off mosaic data enhancement. Defaults to False.
cache_img (str, optional): cache_img is equivalent to cache_type. Defaults to None.
"ram" : Caching imgs to ram for fast training.
"disk": Caching imgs to disk for fast training.
None: Do not use cache, in this case cache_data is also None.
"""
from yolox.data import (
TrainTransform,
YoloBatchSampler,
DataLoader,
InfiniteSampler,
MosaicDetection,
worker_init_reset_seed,
)
from yolox.utils import wait_for_the_master
# if cache is True, we will create self.dataset before launch
# else we will create self.dataset after launch
if self.dataset is None:
with wait_for_the_master():
assert cache_img is None, \
"cache_img must be None if you didn't create self.dataset before launch"
self.dataset = self.get_dataset(cache=False, cache_type=cache_img)
self.dataset = MosaicDetection(
dataset=self.dataset,
mosaic=not no_aug,
img_size=self.input_size,
preproc=TrainTransform(
max_labels=120,
flip_prob=self.flip_prob,
hsv_prob=self.hsv_prob),
degrees=self.degrees,
translate=self.translate,
mosaic_scale=self.mosaic_scale,
mixup_scale=self.mixup_scale,
shear=self.shear,
enable_mixup=self.enable_mixup,
mosaic_prob=self.mosaic_prob,
mixup_prob=self.mixup_prob,
)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)
batch_sampler = YoloBatchSampler(
sampler=sampler,
batch_size=batch_size,
drop_last=False,
mosaic=not no_aug,
)
dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
dataloader_kwargs["batch_sampler"] = batch_sampler
# Make sure each process has different random seed, especially for 'fork' method.
# Check https://github.com/pytorch/pytorch/issues/63311 for more details.
dataloader_kwargs["worker_init_fn"] = worker_init_reset_seed
train_loader = DataLoader(self.dataset, **dataloader_kwargs)
return train_loader
def random_resize(self, data_loader, epoch, rank, is_distributed):
tensor = torch.LongTensor(2).cuda()
if rank == 0:
size_factor = self.input_size[1] * 1.0 / self.input_size[0]
if not hasattr(self, 'random_size'):
min_size = int(self.input_size[0] / 32) - self.multiscale_range
max_size = int(self.input_size[0] / 32) + self.multiscale_range
self.random_size = (min_size, max_size)
size = random.randint(*self.random_size)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
input_size = (tensor[0].item(), tensor[1].item())
return input_size
def preprocess(self, inputs, targets, tsize):
scale_y = tsize[0] / self.input_size[0]
scale_x = tsize[1] / self.input_size[1]
if scale_x != 1 or scale_y != 1:
inputs = nn.functional.interpolate(
inputs, size=tsize, mode="bilinear", align_corners=False
)
targets[..., 1::2] = targets[..., 1::2] * scale_x
targets[..., 2::2] = targets[..., 2::2] * scale_y
return inputs, targets
def get_optimizer(self, batch_size):
if "optimizer" not in self.__dict__:
if self.warmup_epochs > 0:
lr = self.warmup_lr
else:
lr = self.basic_lr_per_img * batch_size
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in self.model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d) or "bn" in k:
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
optimizer = torch.optim.SGD(
pg0, lr=lr, momentum=self.momentum, nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": self.weight_decay}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2})
self.optimizer = optimizer
return self.optimizer
def get_lr_scheduler(self, lr, iters_per_epoch):
from yolox.utils import LRScheduler
scheduler = LRScheduler(
self.scheduler,
lr,
iters_per_epoch,
self.max_epoch,
warmup_epochs=self.warmup_epochs,
warmup_lr_start=self.warmup_lr,
no_aug_epochs=self.no_aug_epochs,
min_lr_ratio=self.min_lr_ratio,
)
return scheduler
def get_eval_dataset(self, **kwargs):
from yolox.data import COCODataset, ValTransform
testdev = kwargs.get("testdev", False)
legacy = kwargs.get("legacy", False)
return COCODataset(
data_dir=self.data_dir,
json_file=self.val_ann if not testdev else self.test_ann,
name="val2017" if not testdev else "test2017",
img_size=self.test_size,
preproc=ValTransform(legacy=legacy),
)
def get_eval_loader(self, batch_size, is_distributed, **kwargs):
valdataset = self.get_eval_dataset(**kwargs)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
valdataset, shuffle=False
)
else:
sampler = torch.utils.data.SequentialSampler(valdataset)
dataloader_kwargs = {
"num_workers": self.data_num_workers,
"pin_memory": True,
"sampler": sampler,
}
dataloader_kwargs["batch_size"] = batch_size
val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False, legacy=False):
from yolox.evaluators import COCOEvaluator
return COCOEvaluator(
dataloader=self.get_eval_loader(batch_size, is_distributed,
testdev=testdev, legacy=legacy),
img_size=self.test_size,
confthre=self.test_conf,
nmsthre=self.nmsthre,
num_classes=self.num_classes,
testdev=testdev,
)
def get_trainer(self, args):
from yolox.core import Trainer
trainer = Trainer(self, args)
# NOTE: trainer shouldn't be an attribute of exp object
return trainer
def eval(self, model, evaluator, is_distributed, half=False, return_outputs=False):
return evaluator.evaluate(model, is_distributed, half, return_outputs=return_outputs)
def check_exp_value(exp: Exp):
h, w = exp.input_size
assert h % 32 == 0 and w % 32 == 0, "input size must be multiples of 32" | null |
189,148 | import contextlib
import io
import itertools
import json
import tempfile
import time
from collections import ChainMap, defaultdict
from loguru import logger
from tabulate import tabulate
from tqdm import tqdm
import numpy as np
import torch
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import (
gather,
is_main_process,
postprocess,
synchronize,
time_synchronized,
xyxy2xywh
)
def per_class_AP_table(coco_eval, class_names=COCO_CLASSES, headers=["class", "AP"], colums=6):
per_class_AP = {}
precisions = coco_eval.eval["precision"]
# dimension of precisions: [TxRxKxAxM]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
per_class_AP[name] = float(ap * 100)
num_cols = min(colums, len(per_class_AP) * len(headers))
result_pair = [x for pair in per_class_AP.items() for x in pair]
row_pair = itertools.zip_longest(*[result_pair[i::num_cols] for i in range(num_cols)])
table_headers = headers * (num_cols // len(headers))
table = tabulate(
row_pair, tablefmt="pipe", floatfmt=".3f", headers=table_headers, numalign="left",
)
return table | null |
189,149 | import os
import pickle
import xml.etree.ElementTree as ET
import numpy as np
def parse_rec(filename):
"""Parse a PASCAL VOC xml file"""
tree = ET.parse(filename)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(
detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False,
):
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, "annots.pkl")
# read list of images
with open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print(f"Reading annotation for {i + 1}/{len(imagenames)}")
# save
print(f"Saving cached annotations to {cachefile}")
with open(cachefile, "wb") as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, "rb") as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
if len(lines) == 0:
return 0, 0, 0
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["difficult"][jmax]:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap | null |
189,154 | import bisect
import copy
import os
import random
from abc import ABCMeta, abstractmethod
from functools import partial, wraps
from multiprocessing.pool import ThreadPool
import psutil
from loguru import logger
from tqdm import tqdm
import numpy as np
from torch.utils.data.dataset import ConcatDataset as torchConcatDataset
from torch.utils.data.dataset import Dataset as torchDataset
def cache_read_img(use_cache=True):
def decorator(read_img_fn):
"""
Decorate the read_img function to cache the image
Args:
read_img_fn: read_img function
use_cache (bool, optional): For the decorated read_img function,
whether to read the image from cache.
Defaults to True.
"""
@wraps(read_img_fn)
def wrapper(self, index, use_cache=use_cache):
cache = self.cache and use_cache
if cache:
if self.cache_type == "ram":
img = self.imgs[index]
img = copy.deepcopy(img)
elif self.cache_type == "disk":
img = np.load(
os.path.join(
self.cache_dir, f"{self.path_filename[index].split('.')[0]}.npy"))
else:
raise ValueError(f"Unknown cache type: {self.cache_type}")
else:
img = read_img_fn(self, index)
return img
return wrapper
return decorator | null |
189,155 | import copy
import os
import cv2
import numpy as np
from pycocotools.coco import COCO
from ..dataloading import get_yolox_datadir
from .datasets_wrapper import CacheDataset, cache_read_img
The provided code snippet includes necessary dependencies for implementing the `remove_useless_info` function. Write a Python function `def remove_useless_info(coco)` to solve the following problem:
Remove useless info in coco dataset. COCO object is modified inplace. This function is mainly used for saving memory (save about 30% mem).
Here is the function:
def remove_useless_info(coco):
"""
Remove useless info in coco dataset. COCO object is modified inplace.
This function is mainly used for saving memory (save about 30% mem).
"""
if isinstance(coco, COCO):
dataset = coco.dataset
dataset.pop("info", None)
dataset.pop("licenses", None)
for img in dataset["images"]:
img.pop("license", None)
img.pop("coco_url", None)
img.pop("date_captured", None)
img.pop("flickr_url", None)
if "annotations" in coco.dataset:
for anno in coco.dataset["annotations"]:
anno.pop("segmentation", None) | Remove useless info in coco dataset. COCO object is modified inplace. This function is mainly used for saving memory (save about 30% mem). |
189,169 | import torch
from torch import nn
from torch.hub import load_state_dict_from_url
def create_yolox_model(name: str, pretrained: bool = True, num_classes: int = 80, device=None,
exp_path: str = None, ckpt_path: str = None) -> nn.Module:
"""creates and loads a YOLOX model
Args:
name (str): name of model. for example, "yolox-s", "yolox-tiny" or "yolox_custom"
if you want to load your own model.
pretrained (bool): load pretrained weights into the model. Default to True.
device (str): default device to for model. Default to None.
num_classes (int): number of model classes. Default to 80.
exp_path (str): path to your own experiment file. Required if name="yolox_custom"
ckpt_path (str): path to your own ckpt. Required if name="yolox_custom" and you want to
load a pretrained model
Returns:
YOLOX model (nn.Module)
"""
from yolox.exp import get_exp, Exp
if device is None:
device = "cuda:0" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
assert name in _CKPT_FULL_PATH or name == "yolox_custom", \
f"user should use one of value in {_CKPT_FULL_PATH.keys()} or \"yolox_custom\""
if name in _CKPT_FULL_PATH:
exp: Exp = get_exp(exp_name=name)
exp.num_classes = num_classes
yolox_model = exp.get_model()
if pretrained and num_classes == 80:
weights_url = _CKPT_FULL_PATH[name]
ckpt = load_state_dict_from_url(weights_url, map_location="cpu")
if "model" in ckpt:
ckpt = ckpt["model"]
yolox_model.load_state_dict(ckpt)
else:
assert exp_path is not None, "for a \"yolox_custom\" model exp_path must be provided"
exp: Exp = get_exp(exp_file=exp_path)
yolox_model = exp.get_model()
if ckpt_path:
ckpt = torch.load(ckpt_path, map_location="cpu")
if "model" in ckpt:
ckpt = ckpt["model"]
yolox_model.load_state_dict(ckpt)
yolox_model.to(device)
return yolox_model
def yolox_custom(ckpt_path: str = None, exp_path: str = None, device: str = None) -> nn.Module:
return create_yolox_model("yolox_custom", ckpt_path=ckpt_path, exp_path=exp_path, device=device) | null |
189,182 | import os
import random
import cv2
import numpy as np
def random_color():
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
The provided code snippet includes necessary dependencies for implementing the `visualize_assign` function. Write a Python function `def visualize_assign(img, boxes, coords, match_results, save_name=None) -> np.ndarray` to solve the following problem:
visualize label assign result. Args: img: img to visualize boxes: gt boxes in xyxy format coords: coords of matched anchors match_results: match results of each gt box and coord. save_name: name of save image, if None, image will not be saved. Default: None.
Here is the function:
def visualize_assign(img, boxes, coords, match_results, save_name=None) -> np.ndarray:
"""visualize label assign result.
Args:
img: img to visualize
boxes: gt boxes in xyxy format
coords: coords of matched anchors
match_results: match results of each gt box and coord.
save_name: name of save image, if None, image will not be saved. Default: None.
"""
for box_id, box in enumerate(boxes):
x1, y1, x2, y2 = box
color = random_color()
assign_coords = coords[match_results == box_id]
if assign_coords.numel() == 0:
# unmatched boxes are red
color = (0, 0, 255)
cv2.putText(
img, "unmatched", (int(x1), int(y1) - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 1
)
else:
for coord in assign_coords:
# draw assigned anchor
cv2.circle(img, (int(coord[0]), int(coord[1])), 3, color, -1)
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
if save_name is not None:
cv2.imwrite(save_name, img)
return img | visualize label assign result. Args: img: img to visualize boxes: gt boxes in xyxy format coords: coords of matched anchors match_results: match results of each gt box and coord. save_name: name of save image, if None, image will not be saved. Default: None. |
189,183 | import os
import random
import cv2
import numpy as np
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path) | null |
189,184 | import os
import random
import cv2
import numpy as np
def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr):
"""Multiclass NMS implemented in Numpy. Class-aware version."""
final_dets = []
num_classes = scores.shape[1]
for cls_ind in range(num_classes):
cls_scores = scores[:, cls_ind]
valid_score_mask = cls_scores > score_thr
if valid_score_mask.sum() == 0:
continue
else:
valid_scores = cls_scores[valid_score_mask]
valid_boxes = boxes[valid_score_mask]
keep = nms(valid_boxes, valid_scores, nms_thr)
if len(keep) > 0:
cls_inds = np.ones((len(keep), 1)) * cls_ind
dets = np.concatenate(
[valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
)
final_dets.append(dets)
if len(final_dets) == 0:
return None
return np.concatenate(final_dets, 0)
def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
"""Multiclass NMS implemented in Numpy. Class-agnostic version."""
cls_inds = scores.argmax(1)
cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
valid_score_mask = cls_scores > score_thr
if valid_score_mask.sum() == 0:
return None
valid_scores = cls_scores[valid_score_mask]
valid_boxes = boxes[valid_score_mask]
valid_cls_inds = cls_inds[valid_score_mask]
keep = nms(valid_boxes, valid_scores, nms_thr)
if keep:
dets = np.concatenate(
[valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
)
return dets
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms` function. Write a Python function `def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True)` to solve the following problem:
Multiclass NMS implemented in Numpy
Here is the function:
def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True):
"""Multiclass NMS implemented in Numpy"""
if class_agnostic:
nms_method = multiclass_nms_class_agnostic
else:
nms_method = multiclass_nms_class_aware
return nms_method(boxes, scores, nms_thr, score_thr) | Multiclass NMS implemented in Numpy |
189,185 | import os
import random
import cv2
import numpy as np
def demo_postprocess(outputs, img_size, p6=False):
grids = []
expanded_strides = []
strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
hsizes = [img_size[0] // stride for stride in strides]
wsizes = [img_size[1] // stride for stride in strides]
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
expanded_strides.append(np.full((*shape, 1), stride))
grids = np.concatenate(grids, 1)
expanded_strides = np.concatenate(expanded_strides, 1)
outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
return outputs | null |
189,199 | import functools
import os
import time
from collections import defaultdict, deque
import psutil
import numpy as np
import torch
def get_total_and_free_memory_in_Mb(cuda_device):
devices_info_str = os.popen(
"nvidia-smi --query-gpu=memory.total,memory.used --format=csv,nounits,noheader"
)
devices_info = devices_info_str.read().strip().split("\n")
if "CUDA_VISIBLE_DEVICES" in os.environ:
visible_devices = os.environ["CUDA_VISIBLE_DEVICES"].split(',')
cuda_device = int(visible_devices[cuda_device])
total, used = devices_info[int(cuda_device)].split(",")
return int(total), int(used)
The provided code snippet includes necessary dependencies for implementing the `occupy_mem` function. Write a Python function `def occupy_mem(cuda_device, mem_ratio=0.9)` to solve the following problem:
pre-allocate gpu memory for training to avoid memory Fragmentation.
Here is the function:
def occupy_mem(cuda_device, mem_ratio=0.9):
"""
pre-allocate gpu memory for training to avoid memory Fragmentation.
"""
total, used = get_total_and_free_memory_in_Mb(cuda_device)
max_mem = int(total * mem_ratio)
block_mem = max_mem - used
x = torch.cuda.FloatTensor(256, 1024, block_mem)
del x
time.sleep(5) | pre-allocate gpu memory for training to avoid memory Fragmentation. |
189,200 | import functools
import os
import time
from collections import defaultdict, deque
import psutil
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `gpu_mem_usage` function. Write a Python function `def gpu_mem_usage()` to solve the following problem:
Compute the GPU memory usage for the current device (MB).
Here is the function:
def gpu_mem_usage():
"""
Compute the GPU memory usage for the current device (MB).
"""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / (1024 * 1024) | Compute the GPU memory usage for the current device (MB). |
189,201 | import functools
import os
import time
from collections import defaultdict, deque
import psutil
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `mem_usage` function. Write a Python function `def mem_usage()` to solve the following problem:
Compute the memory usage for the current machine (GB).
Here is the function:
def mem_usage():
"""
Compute the memory usage for the current machine (GB).
"""
gb = 1 << 30
mem = psutil.virtual_memory()
return mem.used / gb | Compute the memory usage for the current machine (GB). |
189,211 | import numpy as np
import torch
import torchvision
def cxcywh2xyxy(bboxes):
bboxes[:, 0] = bboxes[:, 0] - bboxes[:, 2] * 0.5
bboxes[:, 1] = bboxes[:, 1] - bboxes[:, 3] * 0.5
bboxes[:, 2] = bboxes[:, 0] + bboxes[:, 2]
bboxes[:, 3] = bboxes[:, 1] + bboxes[:, 3]
return bboxes | null |
189,212 | import argparse
import logging as log
import os
import sys
import cv2
import numpy as np
from openvino.inference_engine import IECore
from yolox.data.data_augment import preproc as preprocess
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis
The provided code snippet includes necessary dependencies for implementing the `parse_args` function. Write a Python function `def parse_args() -> argparse.Namespace` to solve the following problem:
Parse and return command line arguments
Here is the function:
def parse_args() -> argparse.Namespace:
"""Parse and return command line arguments"""
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument(
'-h',
'--help',
action='help',
help='Show this help message and exit.')
args.add_argument(
'-m',
'--model',
required=True,
type=str,
help='Required. Path to an .xml or .onnx file with a trained model.')
args.add_argument(
'-i',
'--input',
required=True,
type=str,
help='Required. Path to an image file.')
args.add_argument(
'-o',
'--output_dir',
type=str,
default='demo_output',
help='Path to your output dir.')
args.add_argument(
'-s',
'--score_thr',
type=float,
default=0.3,
help="Score threshould to visualize the result.")
args.add_argument(
'-d',
'--device',
default='CPU',
type=str,
help='Optional. Specify the target device to infer on; CPU, GPU, \
MYRIAD, HDDL or HETERO: is acceptable. The sample will look \
for a suitable plugin for device specified. Default value \
is CPU.')
args.add_argument(
'--labels',
default=None,
type=str,
help='Option:al. Path to a labels mapping file.')
args.add_argument(
'-nt',
'--number_top',
default=10,
type=int,
help='Optional. Number of top results.')
return parser.parse_args() | Parse and return command line arguments |