Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def flush(self, log=False): # pylint:disable=too-many-branches, too-many-nested-blocks
if not self.my_metrics:
logger.debug("Flushing - no metrics to send")
return True
now = int(time.time())
if self.last_failure and self.last_failure + self.metrics_flush_pause > now:
if not self.log_metrics_flush_pause:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.log_metrics_flush_pause = True
if not self.test_connection():
return False
metrics_sent = False
metrics_saved = False
# Flushing to Graphite
if self.graphite_enabled:
try:
logger.debug("Flushing %d metrics to Graphite/carbon", self.metrics_count)
carbon_data = []
for metric in self.my_metrics:
# Get path
path = metric['tags']['path']
for name, value in metric['fields'].items():
carbon_data.append(
('.'.join([self.graphite_prefix, '.'.join([path, name])]),
(metric['time'], value)))
self.carbon.add_data_list(carbon_data)
if self.carbon.send_data():
metrics_sent = True
else:
if log:
logger.warning("Failed sending metrics to Graphite/carbon. "
"Inner stored metric: %d", self.metrics_count)
if self.log_metrics_flush_pause:
logger.warning("Metrics flush restored. "
"Remaining stored metric: %d", self.metrics_count)
self.last_failure = 0
self.log_metrics_flush_pause = False
except Exception as exp: # pylint: disable=broad-except
if not self.log_metrics_flush_pause:
logger.warning("Failed sending metrics to Graphite/carbon: %s:%d. "
"Inner stored metrics count: %d.",
self.graphite_host, self.graphite_port, self.metrics_count)
logger.warning("Exception: %s / %s", str(exp), traceback.print_exc())
else:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.last_failure = now
return False
# Flushing to InfluxDB
# pylint: disable=too-many-nested-blocks
if self.influxdb_enabled:
try:
logger.debug("Flushing %d metrics to InfluxDB", self.metrics_count)
for metric in self.my_metrics:
metric['time'] *= 1000000000
for name, value in metric['fields'].items():
if name.startswith('uom_'):
continue
# Force set float values
if not isinstance(value, float):
try:
value = float(value)
except Exception: # pylint: disable=broad-except
pass
metric['fields'][name] = value
if self.influxdb_tags is not None and isinstance(self.influxdb_tags, dict):
metric['tags'].update(self.influxdb_tags)
# Write data to InfluxDB
metrics_sent = self.influx.write_points(self.my_metrics)
if self.log_metrics_flush_pause:
logger.warning("Metrics flush restored. "
"Remaining stored metric: %d", self.metrics_count)
self.last_failure = 0
self.log_metrics_flush_pause = False
except Exception as exp: # pylint: disable=broad-except
logger.warning("*** Exception: %s", str(exp))
if not self.log_metrics_flush_pause:
logger.warning("Failed sending metrics to InfluxDB: %s:%d. "
"Inner stored metrics count: %d.",
self.influxdb_host, self.influxdb_port, self.metrics_count)
logger.warning("Exception: %s", str(exp))
else:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.last_failure = now
return False
if self.output_file:
try:
logger.debug("Storing %d metrics to %s", self.metrics_count, self.output_file)
with open(self.output_file, 'a') as fp:
for metric in self.my_metrics:
# Get path
path = metric['tags']['path']
for name, value in metric['fields'].items():
fp.write("%s;%s;%s\n" % (metric['time'], '.'.join((path, name)), value))
metrics_saved = True
except Exception as exp: # pylint: disable=broad-except
logger.warning("Failed writing to a file: %s. "
"Inner stored metrics count: %d\n Exception: %s",
self.output_file, self.metrics_count, str(exp))
return False
if ((self.graphite_host or self.influxdb_host) and metrics_sent) or \
(self.output_file and metrics_saved):
self.my_metrics = []
return True | [
"Send inner stored metrics to the configured Graphite or InfluxDB\n\n Returns False if the sending failed with a warning log if log parameter is set\n\n :param log: to log information or not\n :type log: bool\n\n :return: bool\n "
] |
Please provide a description of the function:def send_to_tsdb(self, realm, host, service, metrics, ts, path):
if ts is None:
ts = int(time.time())
data = {
"measurement": service,
"tags": {
"host": host,
"service": service,
"realm": '.'.join(realm) if isinstance(realm, list) else realm,
"path": path
},
"time": ts,
"fields": {}
}
if path is not None:
data['tags'].update({"path": path})
for metric, value, _ in metrics:
data['fields'].update({metric: value})
# Flush if necessary
logger.debug("Data: %s", data)
self.my_metrics.append(data)
if self.metrics_count >= self.metrics_flush_count:
# self.carbon.add_data_list(self.my_metrics)
self.flush() | [
"Send performance data to time series database\n\n Indeed this function stores metrics in the internal cache and checks if the flushing\n is necessary and then flushes.\n\n :param realm: concerned realm\n :type: string\n :param host: concerned host\n :type: string\n :param service: concerned service\n :type: string\n :param metrics: list of metrics couple (name, value)\n :type: list\n :param ts: timestamp\n :type: int\n :param path: full path (eg. Graphite) for the received metrics\n :type: string\n "
] |
Please provide a description of the function:def manage_initial_service_status_brok(self, b):
host_name = b.data['host_name']
service_description = b.data['service_description']
service_id = host_name+"/"+service_description
logger.debug("got initial service status: %s", service_id)
if host_name not in self.hosts_cache:
logger.error("initial service status, host is unknown: %s.", service_id)
return
self.services_cache[service_id] = {
}
if 'customs' in b.data:
self.services_cache[service_id]['_GRAPHITE_POST'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_POST', None))
logger.debug("initial service status received: %s", service_id) | [
"Prepare the known services cache"
] |
Please provide a description of the function:def manage_initial_host_status_brok(self, b):
host_name = b.data['host_name']
logger.debug("got initial host status: %s", host_name)
self.hosts_cache[host_name] = {
'realm_name':
sanitize_name(b.data.get('realm_name', b.data.get('realm', 'All'))),
}
if 'customs' in b.data:
self.hosts_cache[host_name]['_GRAPHITE_PRE'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_PRE', None))
self.hosts_cache[host_name]['_GRAPHITE_GROUP'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_GROUP', None))
logger.debug("initial host status received: %s", host_name) | [
"Prepare the known hosts cache"
] |
Please provide a description of the function:def manage_service_check_result_brok(self, b): # pylint: disable=too-many-branches
host_name = b.data.get('host_name', None)
service_description = b.data.get('service_description', None)
if not host_name or not service_description:
return
service_id = host_name+"/"+service_description
logger.debug("service check result: %s", service_id)
# If host and service initial status broks have not been received, ignore ...
if not self.ignore_unknown and host_name not in self.hosts_cache:
logger.warning("received service check result for an unknown host: %s", service_id)
return
if service_id not in self.services_cache and not self.ignore_unknown:
logger.warning("received service check result for an unknown service: %s", service_id)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata(service_description, b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Custom services variables
desc = sanitize_name(service_description)
if service_id in self.services_cache:
if self.services_cache[service_id].get('_GRAPHITE_POST', None):
desc = ".".join((desc, self.services_cache[service_id].get('_GRAPHITE_POST', None)))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source, desc))
else:
path = '.'.join((hname, desc))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path) | [
"A service check result brok has just arrived ..."
] |
Please provide a description of the function:def manage_host_check_result_brok(self, b): # pylint: disable=too-many-branches
host_name = b.data.get('host_name', None)
if not host_name:
return
logger.debug("host check result: %s", host_name)
# If host initial status brok has not been received, ignore ...
if host_name not in self.hosts_cache and not self.ignore_unknown:
logger.warning("received host check result for an unknown host: %s", host_name)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata('host_check', b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source))
if self.hostcheck:
path = '.'.join((hname, self.graphite_data_source, self.hostcheck))
else:
path = '.'.join((hname, self.hostcheck))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, self.hostcheck, metrics, check_time, path) | [
"An host check result brok has just arrived..."
] |
Please provide a description of the function:def get_comment_brok(self, host_name, service_name=''):
data = self.serialize()
data['host'] = host_name
if service_name:
data['service'] = service_name
return Brok({'type': 'comment', 'data': data}) | [
"Get a comment brok\n\n :param host_name:\n :param service_name:\n :return: brok with wanted data\n :rtype: alignak.brok.Brok\n "
] |
Please provide a description of the function:def main():
try:
args = parse_daemon_args()
daemon = Alignak(**args.__dict__)
daemon.main()
except Exception as exp: # pylint: disable=broad-except
sys.stderr.write("*** Daemon exited because: %s" % str(exp))
traceback.print_exc()
exit(1) | [
"Parse args and run main daemon function\n\n :return: None\n "
] |
Please provide a description of the function:def want_service_notification(self, timeperiods, timestamp, state, n_type,
business_impact, cmd=None):
# pylint: disable=too-many-return-statements
if not self.service_notifications_enabled:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.service_notification_commands:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
notif_period = timeperiods[self.service_notification_period]
in_notification_period = notif_period.is_time_valid(timestamp)
if 'n' in self.service_notification_options:
return False
if in_notification_period:
short_states = {
u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's'
}
if n_type == u'PROBLEM' and state in short_states:
return short_states[state] in self.service_notification_options
if n_type == u'RECOVERY' and n_type in short_states:
return short_states[n_type] in self.service_notification_options
if n_type == u'ACKNOWLEDGEMENT':
return in_notification_period
if n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
return 'f' in self.service_notification_options
if n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'):
# No notification when a downtime was cancelled. Is that true??
# According to the documentation we need to look at _host_ options
return 's' in self.host_notification_options
return False | [
"Check if notification options match the state of the service\n Notification is NOT wanted in ONE of the following case::\n\n * service notifications are disabled\n * cmd is not in service_notification_commands\n * business_impact < self.min_business_impact\n * service_notification_period is not valid\n * state does not match service_notification_options for problem, recovery and flapping\n * state does not match host_notification_options for downtime\n\n :param timestamp: time we want to notify the contact (usually now)\n :type timestamp: int\n :param state: host or service state (\"WARNING\", \"CRITICAL\" ..)\n :type state: str\n :param n_type: type of notification (\"PROBLEM\", \"RECOVERY\" ..)\n :type n_type: str\n :param business_impact: impact of this service\n :type business_impact: int\n :param cmd: command launched to notify the contact\n :type cmd: str\n :return: True if no condition is matched, otherwise False\n :rtype: bool\n TODO: Simplify function\n "
] |
Please provide a description of the function:def want_host_notification(self, timperiods, timestamp,
state, n_type, business_impact, cmd=None):
# pylint: disable=too-many-return-statements
if not self.host_notifications_enabled:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.host_notification_commands:
return False
notif_period = timperiods[self.host_notification_period]
in_notification_period = notif_period.is_time_valid(timestamp)
if 'n' in self.host_notification_options:
return False
if in_notification_period:
short_states = {
u'DOWN': 'd', u'UNREACHABLE': 'u', u'RECOVERY': 'r',
u'FLAPPING': 'f', u'DOWNTIME': 's'
}
if n_type == u'PROBLEM' and state in short_states:
return short_states[state] in self.host_notification_options
if n_type == u'RECOVERY' and n_type in short_states:
return short_states[n_type] in self.host_notification_options
if n_type == u'ACKNOWLEDGEMENT':
return in_notification_period
if n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
return 'f' in self.host_notification_options
if n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'):
return 's' in self.host_notification_options
return False | [
"Check if notification options match the state of the host\n Notification is NOT wanted in ONE of the following case::\n\n * host notifications are disabled\n * cmd is not in host_notification_commands\n * business_impact < self.min_business_impact\n * host_notification_period is not valid\n * state does not match host_notification_options for problem, recovery, flapping and dt\n\n\n :param timestamp: time we want to notify the contact (usually now)\n :type timestamp: int\n :param state: host or service state (\"WARNING\", \"CRITICAL\" ..)\n :type state: str\n :param n_type: type of notification (\"PROBLEM\", \"RECOVERY\" ..)\n :type n_type: str\n :param business_impact: impact of this service\n :type business_impact: int\n :param cmd: command launched to notify the contact\n :type cmd: str\n :return: True if no condition is matched, otherwise False\n :rtype: bool\n TODO: Simplify function\n "
] |
Please provide a description of the function:def get_notification_commands(self, o_type):
# service_notification_commands for service
notif_commands_prop = o_type + '_notification_commands'
notif_commands = getattr(self, notif_commands_prop)
return notif_commands | [
"Get notification commands for object type\n\n :param o_type: object type (host or service)\n :type o_type: str\n :return: command list\n :rtype: list[alignak.objects.command.Command]\n "
] |
Please provide a description of the function:def is_correct(self):
# pylint: disable=too-many-branches
state = True
# Do not execute checks if notifications are disabled
if (hasattr(self, 'service_notification_options') and
self.service_notification_options == ['n']):
if (hasattr(self, 'host_notification_options') and
self.host_notification_options == ['n']):
return True
# Internal checks before executing inherited function...
# Service part
if not hasattr(self, 'service_notification_commands'):
msg = "[notificationway::%s] do not have any service_notification_commands defined" % (
self.get_name()
)
self.add_error(msg)
state = False
else:
for cmd in self.service_notification_commands:
if cmd is None:
msg = "[notificationway::%s] a service_notification_command is missing" % (
self.get_name()
)
self.add_error(msg)
state = False
elif not cmd.is_valid():
msg = "[notificationway::%s] a service_notification_command is invalid" % (
self.get_name()
)
self.add_error(msg)
state = False
if getattr(self, 'service_notification_period', None) is None:
msg = "[notificationway::%s] the service_notification_period is invalid" % (
self.get_name()
)
self.add_error(msg)
state = False
# Now host part
if not hasattr(self, 'host_notification_commands'):
msg = "[notificationway::%s] do not have any host_notification_commands defined" % (
self.get_name()
)
self.add_error(msg)
state = False
else:
for cmd in self.host_notification_commands:
if cmd is None:
msg = "[notificationway::%s] a host_notification_command is missing" % (
self.get_name()
)
self.add_error(msg)
state = False
elif not cmd.is_valid():
msg = "[notificationway::%s] a host_notification_command is invalid (%s)" % (
cmd.get_name(), str(cmd.__dict__)
)
self.add_error(msg)
state = False
if getattr(self, 'host_notification_period', None) is None:
msg = "[notificationway::%s] the host_notification_period is invalid" % (
self.get_name()
)
self.add_error(msg)
state = False
return super(NotificationWay, self).is_correct() and state | [
"Check if this object configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def linkify(self, timeperiods, commands):
self.linkify_with_timeperiods(timeperiods, 'service_notification_period')
self.linkify_with_timeperiods(timeperiods, 'host_notification_period')
self.linkify_command_list_with_commands(commands, 'service_notification_commands')
self.linkify_command_list_with_commands(commands, 'host_notification_commands') | [
"Create link between objects::\n\n * notificationways -> timeperiods\n * notificationways -> commands\n\n :param timeperiods: timeperiods to link\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param commands: commands to link\n :type commands: alignak.objects.command.Commands\n :return: None\n "
] |
Please provide a description of the function:def new_inner_member(self, name, params):
params['notificationway_name'] = name
self.add_item(NotificationWay(params)) | [
"Create new instance of NotificationWay with given name and parameters\n and add it to the item list\n\n :param name: notification way name\n :type name: str\n :param params: notification wat parameters\n :type params: dict\n :return: None\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
# Ok just put None as modulation_period, means 24x7
if not hasattr(self, 'modulation_period'):
self.modulation_period = None
if not hasattr(self, 'customs') or not self.customs:
msg = "[macromodulation::%s] contains no macro definition" % (self.get_name())
self.add_error(msg)
state = False
return super(MacroModulation, self).is_correct() and state | [
"\n Check if this object configuration is correct ::\n\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def serialize(obj, no_dump=False):
# print("Serialize (%s): %s" % (no_dump, obj))
if hasattr(obj, "serialize") and isinstance(obj.serialize, collections.Callable):
o_dict = {
'__sys_python_module__': "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__),
'content': obj.serialize()
}
elif isinstance(obj, dict):
o_dict = {}
for key, value in list(obj.items()):
o_dict[key] = serialize(value, True)
elif isinstance(obj, (list, set)):
o_dict = [serialize(item, True) for item in obj]
else:
o_dict = obj
if no_dump:
return o_dict
result = None
try:
result = json.dumps(o_dict, ensure_ascii=False)
except MemoryError:
return {'_error': 'Not enough memory on this computer to correctly manage Alignak '
'objects serialization! '
'Sorry for this, please log an issue in the project repository.'}
return result | [
"\n Serialize an object.\n\n Returns a dict containing an `_error` property if a MemoryError happens during the\n object serialization. See #369.\n\n :param obj: the object to serialize\n :type obj: alignak.objects.item.Item | dict | list | str\n :param no_dump: if True return dict, otherwise return a json\n :type no_dump: bool\n :return: dict or json dumps dict with the following structure ::\n\n {'__sys_python_module__': \"%s.%s\" % (o_cls.__module__, o_cls.__name__)\n 'content' : obj.serialize()}\n :rtype: dict | str\n "
] |
Please provide a description of the function:def unserialize(j_obj, no_load=False):
if not j_obj:
return j_obj
# print("Unserialize (%s): %s" % (no_load, j_obj))
if no_load:
data = j_obj
else:
data = json.loads(j_obj)
if isinstance(data, dict):
if '__sys_python_module__' in data:
cls = get_alignak_class(data['__sys_python_module__'])
# Awful hack for external commands ... need to be refactored!
if data['__sys_python_module__'] in ['alignak.external_command.ExternalCommand']:
return cls(data['content']['cmd_line'], data['content']['creation_timestamp'])
return cls(data['content'], parsing=False)
data_dict = {}
for key, value in list(data.items()):
data_dict[key] = unserialize(value, True)
return data_dict
if isinstance(data, list):
return [unserialize(item, True) for item in data]
return data | [
"\n Un-serialize object. If we have __sys_python_module__ we try to safely get the alignak class\n Then we re-instantiate the alignak object\n\n :param j_obj: json object, dict\n :type j_obj: str (before loads)\n :param no_load: if True, j_obj is a dict, otherwise it's a json and need loads it\n :type no_load: bool\n :return: un-serialized object\n "
] |
Please provide a description of the function:def get_alignak_class(python_path):
a_module, a_class = python_path.rsplit('.', 1)
if not a_module.startswith('alignak'): # pragma: no cover - should never happen!
raise AlignakClassLookupException("Can't recreate object in module: %s. "
"Not an Alignak module" % a_module)
if a_module not in sys.modules: # pragma: no cover - should never happen!
raise AlignakClassLookupException("Can't recreate object in unknown module: %s. "
"No such Alignak module. Alignak versions may mismatch" %
a_module)
pymodule = sys.modules[a_module]
if not hasattr(pymodule, a_class): # pragma: no cover - should never happen!
raise AlignakClassLookupException("Can't recreate object %s in %s module. "
"Module does not have this attribute. "
"Alignak versions may mismatch" % (a_class, a_module))
# Awful hack for external commands ... need to be refactored!
if a_class not in ['ExternalCommand']:
if not isinstance(getattr(pymodule, a_class), type): # pragma: no cover - protection
raise AlignakClassLookupException("Can't recreate object %s in %s module. "
"This type is not a class" % (a_class, a_module))
return getattr(pymodule, a_class) | [
" Get the alignak class the in safest way I could imagine.\n Return None if (cumulative conditions) ::\n\n * the module does not start with alignak\n * above is false and the module is not is sys.modules\n * above is false and the module does not have the wanted class\n * above is false and the class in not a ClassType\n\n :param python_path:\n :type python_path: str\n :return: alignak class\n :raise AlignakClassLookupException\n "
] |
Please provide a description of the function:def get_event(self):
self.prepare()
return (self.creation_time, self.data['level'], self.data['message']) | [
"This function returns an Event from a Brok\n\n If the type is monitoring_log then the Brok contains a monitoring event\n (alert, notification, ...) information. This function will return a tuple\n with the creation time, the level and message information\n\n :return: tuple with date, level and message\n :rtype: tuple\n "
] |
Please provide a description of the function:def serialize(self):
return {
"uuid": self.uuid, "type": self.type, "instance_id": self.instance_id,
"prepared": self.prepared, "creation_time": self.creation_time,
"data": self.data
} | [
"This function serialize into a simple dict object.\n It is used when transferring data to other daemons over the network (http)\n\n Here we directly return all attributes\n\n :return: json representation of a Brok\n :rtype: dict\n "
] |
Please provide a description of the function:def prepare(self):
# Maybe the Brok is a old daemon one or was already prepared
# if so, the data is already ok
if hasattr(self, 'prepared') and not self.prepared:
self.data = unserialize(self.data)
if self.instance_id:
self.data['instance_id'] = self.instance_id
self.prepared = True | [
"Un-serialize data from data attribute and add instance_id key if necessary\n\n :return: None\n "
] |
Please provide a description of the function:def resolve_elements(self):
# If it's a leaf, we just need to dump a set with the content of the node
if self.leaf:
if not self.content:
return set()
return set(self.content)
# first got the not ones in a list, and the other in the other list
not_nodes = [s for s in self.sons if s.not_value]
positiv_nodes = [s for s in self.sons if not s.not_value] # ok a not not is hard to read..
# By default we are using a OR rule
if not self.operand:
self.operand = '|'
res = set()
# The operand will change the positiv loop only
i = 0
for node in positiv_nodes:
node_members = node.resolve_elements()
if self.operand == '|':
res = res.union(node_members)
elif self.operand == '&':
# The first elements of an AND rule should be used
if i == 0:
res = node_members
else:
res = res.intersection(node_members)
i += 1
# And we finally remove all NOT elements from the result
for node in not_nodes:
node_members = node.resolve_elements()
res = res.difference(node_members)
return res | [
"Get element of this node recursively\n Compute rules with OR or AND rule then NOT rules.\n\n :return: set of element\n :rtype: set\n "
] |
Please provide a description of the function:def eval_cor_pattern(self, pattern): # pylint:disable=too-many-branches
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()+&|,':
if char in pattern:
complex_node = True
node = ComplexExpressionNode()
# if it's a single expression like !linux or production
# (where "linux" and "production" are hostgroup names)
# we will get the objects from it and return a leaf node
if not complex_node:
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
node.operand = self.ctx
node.leaf = True
obj, error = self.find_object(pattern)
if obj is not None:
node.content = obj
else:
node.configuration_errors.append(error)
return node
in_par = False
tmp = ''
stacked_par = 0
for char in pattern:
if char in (',', '|'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '|'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char in ('&', '+'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '&'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char == '(':
stacked_par += 1
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_par == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near", tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_par > 1:
tmp += char
elif char == ')':
stacked_par -= 1
if stacked_par < 0:
# TODO : real error
print("Error : bad expression near", tmp, "too much ')'")
continue
if stacked_par == 0:
tmp = tmp.strip()
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += char
# Maybe it's a classic character, if so, continue
else:
tmp += char
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
return node | [
"Parse and build recursively a tree of ComplexExpressionNode from pattern\n\n :param pattern: pattern to parse\n :type pattern: str\n :return: root node of parsed tree\n :type: alignak.complexexpression.ComplexExpressionNode\n "
] |
Please provide a description of the function:def find_object(self, pattern):
obj = None
error = None
pattern = pattern.strip()
if pattern == '*':
obj = [h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_tpl()]
return obj, error
# Ok a more classic way
if self.ctx == 'hostgroups':
# Ok try to find this hostgroup
hgr = self.grps.find_by_name(pattern)
# Maybe it's an known one?
if not hgr:
error = "Error : cannot find the %s of the expression '%s'" % (self.ctx, pattern)
return hgr, error
# Ok the group is found, get the elements!
elts = hgr.get_hosts()
elts = strip_and_uniq(elts)
# Maybe the hostgroup memebrs is '*', if so expand with all hosts
if '*' in elts:
elts.extend([h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_tpl()])
# And remove this strange hostname too :)
elts.remove('*')
return elts, error
obj = self.grps.find_hosts_that_use_template(pattern)
return obj, error | [
"Get a list of host corresponding to the pattern regarding the context\n\n :param pattern: pattern to find\n :type pattern: str\n :return: Host list matching pattern (hostgroup name, template, all)\n :rtype: list[alignak.objects.host.Host]\n "
] |
Please provide a description of the function:def reset(self):
# pylint: disable=not-context-manager
logger.info("Scheduling loop reset")
with self.waiting_results.mutex:
self.waiting_results.queue.clear()
self.checks.clear()
self.actions.clear() | [
"Reset scheduler::\n\n * Remove waiting results\n * Clear checks and actions lists\n\n :return: None\n "
] |
Please provide a description of the function:def all_my_hosts_and_services(self):
for what in (self.hosts, self.services):
for item in what:
yield item | [
"Create an iterator for all my known hosts and services\n\n :return: None\n "
] |
Please provide a description of the function:def load_conf(self, instance_id, instance_name, conf):
self.pushed_conf = conf
logger.info("loading my configuration (%s / %s):",
instance_id, self.pushed_conf.instance_id)
logger.debug("Properties:")
for key in sorted(self.pushed_conf.properties):
logger.debug("- %s: %s", key, getattr(self.pushed_conf, key, []))
logger.debug("Macros:")
for key in sorted(self.pushed_conf.macros):
logger.debug("- %s: %s", key, getattr(self.pushed_conf.macros, key, []))
logger.debug("Objects types:")
for _, _, strclss, _, _ in list(self.pushed_conf.types_creations.values()):
if strclss in ['arbiters', 'schedulers', 'brokers',
'pollers', 'reactionners', 'receivers']:
continue
setattr(self, strclss, getattr(self.pushed_conf, strclss, []))
# Internal statistics
logger.debug("- %d %s", len(getattr(self, strclss)), strclss)
statsmgr.gauge('configuration.%s' % strclss, len(getattr(self, strclss)))
# We need reversed list for searching in the retention file read
# todo: check what it is about...
self.services.optimize_service_search(self.hosts)
# Just deprecated
# # Compile the triggers
# if getattr(self, 'triggers', None):
# logger.info("compiling the triggers...")
# self.triggers.compile()
# self.triggers.load_objects(self)
# else:
# logger.info("No triggers")
# From the Arbiter configuration. Used for satellites to differentiate the schedulers
self.alignak_name = self.pushed_conf.alignak_name
self.instance_id = instance_id
self.instance_name = instance_name
self.push_flavor = getattr(self.pushed_conf, 'push_flavor', 'None')
logger.info("Set my scheduler instance: %s - %s - %s",
self.instance_id, self.instance_name, self.push_flavor)
# Tag our monitored hosts/services with our instance_id
for item in self.all_my_hosts_and_services():
item.instance_id = self.instance_id | [
"Load configuration received from Arbiter and pushed by our Scheduler daemon\n\n :param instance_name: scheduler instance name\n :type instance_name: str\n :param instance_id: scheduler instance id\n :type instance_id: str\n :param conf: configuration to load\n :type conf: alignak.objects.config.Config\n :return: None\n "
] |
Please provide a description of the function:def update_recurrent_works_tick(self, conf):
for key in self.recurrent_works:
(name, fun, _) = self.recurrent_works[key]
if isinstance(conf, dict):
new_tick = conf.get('tick_%s' % name, None)
else:
new_tick = getattr(conf, 'tick_%s' % name, None)
if new_tick is not None:
logger.debug("Requesting to change the default tick to %d for the action %s",
int(new_tick), name)
else:
continue
# Update the default scheduler tick for this function
try:
new_tick = int(new_tick)
logger.info("Changing the default tick to %d for the action %s", new_tick, name)
self.recurrent_works[key] = (name, fun, new_tick)
except ValueError:
logger.warning("Changing the default tick for '%s' to '%s' failed!", new_tick, name) | [
"Modify the tick value for the scheduler recurrent work\n\n A tick is an amount of loop of the scheduler before executing the recurrent work\n\n The provided configuration may contain some tick-function_name keys that contain\n a tick value to be updated. Those parameters are defined in the alignak environment file.\n\n Indeed this function is called with the Scheduler daemon object. Note that the ``conf``\n parameter may also be a dictionary.\n\n :param conf: the daemon link configuration to search in\n :type conf: alignak.daemons.schedulerdaemon.Alignak\n :return: None\n "
] |
Please provide a description of the function:def dump_objects(self):
path = os.path.join(tempfile.gettempdir(),
'dump-obj-scheduler-%s-%d.json' % (self.name, int(time.time())))
logger.info('Dumping scheduler objects to: %s', path)
try:
fd = open(path, 'wb')
output = 'type:uuid:status:t_to_go:poller_tag:worker:command\n'
fd.write(output.encode('utf-8'))
for check in list(self.checks.values()):
output = 'check:%s:%s:%s:%s:%s:%s\n' \
% (check.uuid, check.status, check.t_to_go, check.poller_tag,
check.command, check.my_worker)
fd.write(output.encode('utf-8'))
logger.info('- dumped checks')
for action in list(self.actions.values()):
output = '%s: %s:%s:%s:%s:%s:%s\n'\
% (action.__class__.my_type, action.uuid, action.status,
action.t_to_go, action.reactionner_tag, action.command,
action.my_worker)
fd.write(output.encode('utf-8'))
logger.info('- dumped actions')
broks = []
for broker in list(self.my_daemon.brokers.values()):
for brok in broker.broks:
broks.append(brok)
for brok in broks:
output = 'BROK: %s:%s\n' % (brok.uuid, brok.type)
fd.write(output.encode('utf-8'))
logger.info('- dumped broks')
fd.close()
logger.info('Dumped')
except OSError as exp: # pragma: no cover, should never happen...
logger.critical("Error when writing the objects dump file %s : %s", path, str(exp)) | [
"Dump scheduler objects into a dump (temp) file\n\n :return: None\n "
] |
Please provide a description of the function:def dump_config(self):
path = os.path.join(tempfile.gettempdir(),
'dump-cfg-scheduler-%s-%d.json' % (self.name, int(time.time())))
try:
self.pushed_conf.dump(path)
except (OSError, IndexError) as exp: # pragma: no cover, should never happen...
logger.critical("Error when writing the configuration dump file %s: %s",
path, str(exp)) | [
"Dump scheduler configuration into a temporary file\n\n The dumped content is JSON formatted\n\n :return: None\n "
] |
Please provide a description of the function:def run_external_commands(self, cmds):
if not self.external_commands_manager:
return
try:
_t0 = time.time()
logger.debug("Scheduler '%s' got %d commands", self.name, len(cmds))
for command in cmds:
self.external_commands_manager.resolve_command(ExternalCommand(command))
statsmgr.counter('external-commands.got.count', len(cmds))
statsmgr.timer('external-commands.got.time', time.time() - _t0)
except Exception as exp: # pylint: disable=broad-except
logger.warning("External command parsing error: %s", exp)
logger.warning("Exception: %s / %s", str(exp), traceback.print_exc())
for command in cmds:
try:
command = command.decode('utf8', 'ignore')
except UnicodeEncodeError:
pass
except AttributeError:
pass
logger.warning("Command: %s", command) | [
"Run external commands Arbiter/Receiver sent\n\n :param cmds: commands to run\n :type cmds: list\n :return: None\n "
] |
Please provide a description of the function:def add_brok(self, brok, broker_uuid=None):
# We tag the brok with our instance_id
brok.instance_id = self.instance_id
if brok.type == 'monitoring_log':
# The brok is a monitoring event
with self.my_daemon.events_lock:
self.my_daemon.events.append(brok)
statsmgr.counter('events', 1)
return
if broker_uuid:
if broker_uuid not in self.my_daemon.brokers:
logger.info("Unknown broker: %s / %s!", broker_uuid, self.my_daemon.brokers)
return
broker_link = self.my_daemon.brokers[broker_uuid]
logger.debug("Adding a brok %s for: %s", brok.type, broker_uuid)
# it's just for one broker
self.my_daemon.brokers[broker_link.uuid].broks.append(brok)
self.nb_broks += 1
else:
logger.debug("Adding a brok %s to all brokers", brok.type)
# add brok to all brokers
for broker_link_uuid in self.my_daemon.brokers:
logger.debug("- adding to %s", self.my_daemon.brokers[broker_link_uuid])
self.my_daemon.brokers[broker_link_uuid].broks.append(brok)
self.nb_broks += 1 | [
"Add a brok into brokers list\n It can be for a specific one, all brokers or none (startup)\n\n :param brok: brok to add\n :type brok: alignak.brok.Brok\n :param broker_uuid: broker uuid for the brok\n :type broker_uuid: str\n :return: None\n "
] |
Please provide a description of the function:def add_notification(self, notification):
if notification.uuid in self.actions:
logger.warning("Already existing notification: %s", notification)
return
logger.debug("Adding a notification: %s", notification)
self.actions[notification.uuid] = notification
self.nb_notifications += 1
# A notification which is not a master one asks for a brok
if notification.contact is not None:
self.add(notification.get_initial_status_brok()) | [
"Add a notification into actions list\n\n :param notification: notification to add\n :type notification: alignak.notification.Notification\n :return: None\n "
] |
Please provide a description of the function:def add_check(self, check):
if check is None:
return
if check.uuid in self.checks:
logger.debug("Already existing check: %s", check)
return
logger.debug("Adding a check: %s", check)
# Add a new check to the scheduler checks list
self.checks[check.uuid] = check
self.nb_checks += 1
# Raise a brok to inform about a next check is to come ...
# but only for items that are actively checked
item = self.find_item_by_id(check.ref)
if item.active_checks_enabled:
self.add(item.get_next_schedule_brok()) | [
"Add a check into the scheduler checks list\n\n :param check: check to add\n :type check: alignak.check.Check\n :return: None\n "
] |
Please provide a description of the function:def add_event_handler(self, action):
if action.uuid in self.actions:
logger.info("Already existing event handler: %s", action)
return
self.actions[action.uuid] = action
self.nb_event_handlers += 1 | [
"Add a event handler into actions list\n\n :param action: event handler to add\n :type action: alignak.eventhandler.EventHandler\n :return: None\n "
] |
Please provide a description of the function:def add(self, elt):
if elt is None:
return
logger.debug("Adding: %s / %s", elt.my_type, elt.__dict__)
fun = self.__add_actions.get(elt.__class__, None)
if fun:
fun(self, elt)
else:
logger.warning("self.add(): Unmanaged object class: %s (object=%r)", elt.__class__, elt) | [
"Generic function to add objects into the scheduler daemon internal lists::\n Brok -> self.broks\n Check -> self.checks\n Notification -> self.actions\n EventHandler -> self.actions\n\n For an ExternalCommand, tries to resolve the command\n\n :param elt: element to add\n :type elt:\n :return: None\n "
] |
Please provide a description of the function:def hook_point(self, hook_name):
self.my_daemon.hook_point(hook_name=hook_name, handle=self) | [
"Generic function to call modules methods if such method is avalaible\n\n :param hook_name: function name to call\n :type hook_name: str\n :return:None\n "
] |
Please provide a description of the function:def clean_queues(self):
# pylint: disable=too-many-locals
# If we set the interval at 0, we bail out
if getattr(self.pushed_conf, 'tick_clean_queues', 0) == 0:
logger.debug("No queues cleaning...")
return
max_checks = MULTIPLIER_MAX_CHECKS * (len(self.hosts) + len(self.services))
max_broks = MULTIPLIER_MAX_BROKS * (len(self.hosts) + len(self.services))
max_actions = MULTIPLIER_MAX_ACTIONS * len(self.contacts) * (len(self.hosts) +
len(self.services))
# For checks, it's not very simple:
# For checks, they may be referred to their host/service
# We do not just del them in the check list, but also in their service/host
# We want id of lower than max_id - 2*max_checks
self.nb_checks_dropped = 0
if max_checks and len(self.checks) > max_checks:
# keys does not ensure sorted keys. Max is slow but we have no other way.
to_del_checks = [c for c in list(self.checks.values())]
to_del_checks.sort(key=lambda x: x.creation_time)
to_del_checks = to_del_checks[:-max_checks]
self.nb_checks_dropped = len(to_del_checks)
if to_del_checks:
logger.warning("I have to drop some checks (%d)..., sorry :(",
self.nb_checks_dropped)
for chk in to_del_checks:
c_id = chk.uuid
items = getattr(self, chk.ref_type + 's')
elt = items[chk.ref]
# First remove the link in host/service
elt.remove_in_progress_check(chk)
# Then in dependent checks (I depend on, or check
# depend on me)
for dependent_checks in chk.depend_on_me:
dependent_checks.depend_on.remove(chk.uuid)
for c_temp in chk.depend_on:
c_temp.depend_on_me.remove(chk)
del self.checks[c_id] # Final Bye bye ...
# For broks and actions, it's more simple
# or broks, manage global but also all brokers
self.nb_broks_dropped = 0
for broker_link in list(self.my_daemon.brokers.values()):
if max_broks and len(broker_link.broks) > max_broks:
logger.warning("I have to drop some broks (%d > %d) for the broker %s "
"..., sorry :(", len(broker_link.broks), max_broks, broker_link)
kept_broks = sorted(broker_link.broks, key=lambda x: x.creation_time)
# Delete the oldest broks to keep the max_broks most recent...
# todo: is it a good choice !
broker_link.broks = kept_broks[0:max_broks]
self.nb_actions_dropped = 0
if max_actions and len(self.actions) > max_actions:
logger.warning("I have to del some actions (currently: %d, max: %d)..., sorry :(",
len(self.actions), max_actions)
to_del_actions = [c for c in list(self.actions.values())]
to_del_actions.sort(key=lambda x: x.creation_time)
to_del_actions = to_del_actions[:-max_actions]
self.nb_actions_dropped = len(to_del_actions)
for act in to_del_actions:
if act.is_a == 'notification':
self.find_item_by_id(act.ref).remove_in_progress_notification(act)
del self.actions[act.uuid] | [
"Reduces internal list size to max allowed\n\n * checks and broks : 5 * length of hosts + services\n * actions : 5 * length of hosts + services + contacts\n\n :return: None\n "
] |
Please provide a description of the function:def update_business_values(self):
for elt in self.all_my_hosts_and_services():
if not elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value(self.hosts, self.services,
self.timeperiods, self.businessimpactmodulations)
new = elt.business_impact
# Ok, the business_impact change, we can update the broks
if new != was:
self.get_and_register_status_brok(elt)
# When all impacts and classic elements are updated,
# we can update problems (their value depend on impacts, so
# they must be done after)
for elt in self.all_my_hosts_and_services():
# We first update impacts and classic elements
if elt.is_problem:
was = elt.business_impact
elt.update_business_impact_value(self.hosts, self.services,
self.timeperiods, self.businessimpactmodulations)
new = elt.business_impact
# Maybe one of the impacts change it's business_impact to a high value
# and so ask for the problem to raise too
if new != was:
self.get_and_register_status_brok(elt) | [
"Iter over host and service and update business_impact\n\n :return: None\n "
] |
Please provide a description of the function:def scatter_master_notifications(self):
now = time.time()
# We only want the master scheduled notifications that are immediately launchable
notifications = [a for a in self.actions.values()
if a.is_a == u'notification' and a.status == ACT_STATUS_SCHEDULED
and not a.contact and a.is_launchable(now)]
if notifications:
logger.debug("Scatter master notification: %d notifications",
len(notifications))
for notification in notifications:
logger.debug("Scheduler got a master notification: %s", notification)
# This is a "master" notification created by an host/service.
# We use it to create children notifications (for the contacts and
# notification_commands) which are executed in the reactionner.
item = self.find_item_by_id(notification.ref)
children = []
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
if not item.is_blocking_notifications(notification_period,
self.hosts, self.services,
notification.type, now):
# If it is possible to send notifications
# of this type at the current time, then create
# a single notification for each contact of this item.
children = item.scatter_notification(
notification, self.contacts, self.notificationways, self.timeperiods,
self.macromodulations, self.escalations,
self.find_item_by_id(getattr(item, "host", None))
)
for notif in children:
logger.debug(" - child notification: %s", notif)
notif.status = ACT_STATUS_SCHEDULED
# Add the notification to the scheduler objects
self.add(notif)
# If we have notification_interval then schedule
# the next notification (problems only)
if notification.type == u'PROBLEM':
# Update the ref notif number after raise the one of the notification
if children:
# notif_nb of the master notification
# was already current_notification_number+1.
# If notifications were sent,
# then host/service-counter will also be incremented
item.current_notification_number = notification.notif_nb
if item.notification_interval and notification.t_to_go is not None:
# We must continue to send notifications.
# Just leave it in the actions list and set it to "scheduled"
# and it will be found again later
# Ask the service/host to compute the next notif time. It can be just
# a.t_to_go + item.notification_interval*item.__class__.interval_length
# or maybe before because we have an
# escalation that need to raise up before
notification.t_to_go = item.get_next_notification_time(notification,
self.escalations,
self.timeperiods)
notification.notif_nb = item.current_notification_number + 1
logger.debug("Repeat master notification: %s", notification)
else:
# Wipe out this master notification. It is a master one
item.remove_in_progress_notification(notification)
logger.debug("Remove master notification (no repeat): %s", notification)
else:
# Wipe out this master notification.
logger.debug("Remove master notification (no more a problem): %s", notification)
# We don't repeat recover/downtime/flap/etc...
item.remove_in_progress_notification(notification) | [
"Generate children notifications from a master notification\n Also update notification number\n\n Master notification are raised when a notification must be sent out. They are not\n launched by reactionners (only children are) but they are used to build the\n children notifications.\n\n From one master notification, several children notifications may be built,\n indeed one per each contact...\n\n :return: None\n "
] |
Please provide a description of the function:def get_to_run_checks(self, do_checks=False, do_actions=False,
poller_tags=None, reactionner_tags=None,
worker_name='none', module_types=None):
# pylint: disable=too-many-branches
res = []
now = time.time()
if poller_tags is None:
poller_tags = ['None']
if reactionner_tags is None:
reactionner_tags = ['None']
if module_types is None:
module_types = ['fork']
if not isinstance(module_types, list):
module_types = [module_types]
# If a poller wants its checks
if do_checks:
if self.checks:
logger.debug("I have %d prepared checks", len(self.checks))
for check in list(self.checks.values()):
logger.debug("Check: %s (%s / %s)", check.uuid, check.poller_tag, check.module_type)
if check.internal:
# Do not care about Alignak internally executed checks
continue
# If the command is untagged, and the poller too, or if both are tagged
# with same name, go for it
# if do_check, call for poller, and so poller_tags by default is ['None']
# by default poller_tag is 'None' and poller_tags is ['None']
# and same for module_type, the default is the 'fork' type
if check.poller_tag not in poller_tags:
logger.debug(" -> poller tag do not match")
continue
if check.module_type not in module_types:
logger.debug(" -> module type do not match")
continue
logger.debug(" -> : %s %s (%s)",
'worker' if not check.internal else 'internal',
check.status,
'now' if check.is_launchable(now) else 'not yet')
if check._is_orphan and check.status == ACT_STATUS_SCHEDULED \
and os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- orphan check: %s -> : %s %s (%s)",
check, 'worker' if not check.internal else 'internal',
check.status, 'now' if check.is_launchable(now) else 'not yet')
# must be ok to launch, and not an internal one (business rules based)
if check.status == ACT_STATUS_SCHEDULED and check.is_launchable(now):
logger.debug("Check to run: %s", check)
check.status = ACT_STATUS_POLLED
check.my_worker = worker_name
res.append(check)
# Stats
self.nb_checks_launched += 1
if 'ALIGNAK_LOG_ACTIONS' in os.environ:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Check to run: %s", check)
else:
logger.info("Check to run: %s", check)
if res:
logger.debug("-> %d checks to start now", len(res))
else:
logger.debug("-> no checks to start now")
# If a reactionner wants its actions
if do_actions:
if self.actions:
logger.debug("I have %d prepared actions", len(self.actions))
for action in list(self.actions.values()):
logger.debug("Action: %s (%s / %s)",
action.uuid, action.reactionner_tag, action.module_type)
if action.internal:
# Do not care about Alignak internally executed checks
continue
is_master = (action.is_a == 'notification' and not action.contact)
if is_master:
continue
# if do_action, call the reactionner,
# and so reactionner_tags by default is ['None']
# by default reactionner_tag is 'None' and reactionner_tags is ['None'] too
# So if not the good one, loop for next :)
if action.reactionner_tag not in reactionner_tags:
logger.debug(" -> reactionner tag do not match")
continue
# same for module_type
if action.module_type not in module_types:
logger.debug(" -> module type do not match")
continue
# And now look if we can launch or not :)
logger.debug(" -> : worker %s (%s)",
action.status, 'now' if action.is_launchable(now) else 'not yet')
if action._is_orphan and action.status == ACT_STATUS_SCHEDULED and \
os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- orphan action: %s", action)
if action.status == ACT_STATUS_SCHEDULED and action.is_launchable(now):
# This is for child notifications and eventhandlers
action.status = ACT_STATUS_POLLED
action.my_worker = worker_name
res.append(action)
# Stats
self.nb_actions_launched += 1
if 'ALIGNAK_LOG_ACTIONS' in os.environ:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Action to run: %s", action)
else:
logger.info("Action to run: %s", action)
if res:
logger.debug("-> %d actions to start now", len(res))
else:
logger.debug("-> no actions to start now")
return res | [
"Get actions/checks for reactionner/poller\n\n Called by the poller to get checks (do_checks=True) and\n by the reactionner (do_actions=True) to get actions\n\n :param do_checks: do we get checks ?\n :type do_checks: bool\n :param do_actions: do we get actions ?\n :type do_actions: bool\n :param poller_tags: poller tags to filter\n :type poller_tags: list\n :param reactionner_tags: reactionner tags to filter\n :type reactionner_tags: list\n :param worker_name: worker name to fill check/action (to remember it)\n :type worker_name: str\n :param module_types: module type to filter\n :type module_types: list\n :return: Check/Action list with poller/reactionner tags matching and module type matching\n :rtype: list\n "
] |
Please provide a description of the function:def manage_results(self, action): # pylint: disable=too-many-branches,too-many-statements
logger.debug('manage_results: %s ', action)
if action.is_a == 'notification':
try:
_ = self.actions[action.uuid]
except KeyError as exp: # pragma: no cover, simple protection
# Cannot find notification - drop it
logger.warning('manage_results:: get unknown notification : %s ', str(exp))
for uuid in self.actions:
logger.debug('manage_results:: known action: %s ', self.actions[uuid])
return
# We will only see child notifications here
try:
timeout = False
execution_time = 0
if action.status == ACT_STATUS_TIMEOUT:
# Unfortunately the remove_in_progress_notification
# sets the status to zombie, so we need to save it here.
timeout = True
execution_time = action.execution_time
# Add protection for strange charset
try:
action.output = action.output.decode('utf8', 'ignore')
except UnicodeDecodeError:
pass
except AttributeError:
# Python 3 will raise an exception
pass
self.actions[action.uuid].get_return_from(action)
item = self.find_item_by_id(self.actions[action.uuid].ref)
item.remove_in_progress_notification(action)
self.actions[action.uuid].status = ACT_STATUS_ZOMBIE
item.last_notification = int(action.check_time)
# And we ask the item to update its state
self.get_and_register_status_brok(item)
# If we' ve got a problem with the notification, raise a Warning log
if timeout:
contact = self.find_item_by_id(self.actions[action.uuid].contact)
item = self.find_item_by_id(self.actions[action.uuid].ref)
self.nb_actions_results_timeout += 1
logger.warning("Contact %s %s notification command '%s ' "
"timed out after %.2f seconds",
contact.contact_name,
item.my_type,
self.actions[action.uuid].command,
execution_time)
else:
self.nb_actions_results += 1
if action.exit_status != 0:
logger.warning("The notification command '%s' raised an error "
"(exit code=%d): '%s'",
action.command, action.exit_status, action.output)
except (ValueError, AttributeError) as exp: # pragma: no cover, simple protection
# bad object, drop it
logger.warning('manage_results:: got bad notification : %s ', str(exp))
elif action.is_a == 'check':
try:
check = self.checks[action.uuid]
except KeyError as exp: # pragma: no cover, simple protection
# Cannot find check - drop it
logger.warning('manage_results:: get unknown check: %s ', action)
for uuid in self.checks:
logger.debug('manage_results:: known check: %s ', self.checks[uuid])
return
try:
if action.status == ACT_STATUS_TIMEOUT:
ref = self.find_item_by_id(check.ref)
action.long_output = action.output
action.output = "(%s %s check timed out)" % (ref.my_type, ref.get_full_name())
action.exit_status = self.pushed_conf.timeout_exit_status
self.nb_checks_results_timeout += 1
logger.warning("Timeout raised for '%s' (check command for the %s '%s'), "
"check status code: %d, execution time: %d seconds",
action.command, ref.my_type, ref.get_full_name(),
action.exit_status, int(action.execution_time))
else:
self.nb_checks_results += 1
if action.passive_check:
self.nb_checks_results_passive += 1
else:
self.nb_checks_results_active += 1
check.get_return_from(action)
check.status = ACT_STATUS_WAIT_CONSUME
if check._is_orphan and os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- got a result for an orphan check: %s", check)
except (ValueError, AttributeError) as exp: # pragma: no cover, simple protection
# bad object, drop it
logger.warning('manage_results:: got bad check: %s ', str(exp))
elif action.is_a == 'eventhandler':
try:
old_action = self.actions[action.uuid]
old_action.status = ACT_STATUS_ZOMBIE
except KeyError as exp: # pragma: no cover, simple protection
# cannot find old action
# bad object, drop it
logger.warning('manage_results:: get bad check: %s ', str(exp))
return
try:
if action.status == ACT_STATUS_TIMEOUT:
_type = 'event handler'
if action.is_snapshot:
_type = 'snapshot'
ref = self.find_item_by_id(self.checks[action.uuid].ref)
logger.info("%s %s command '%s' timed out after %d seconds",
ref.__class__.my_type.capitalize(), # pylint: disable=E1101
_type, self.actions[action.uuid].command,
int(action.execution_time))
self.nb_actions_results_timeout += 1
else:
self.nb_actions_results += 1
# If it's a snapshot we should get the output and export it
if action.is_snapshot:
old_action.get_return_from(action)
s_item = self.find_item_by_id(old_action.ref)
self.add(s_item.get_snapshot_brok(old_action.output, old_action.exit_status))
except (ValueError, AttributeError) as exp: # pragma: no cover, simple protection
# bad object, drop it
logger.warning('manage_results:: got bad event handler: %s ', str(exp))
else: # pragma: no cover, simple protection, should not happen!
logger.error("The received result type in unknown! %s", str(action.is_a)) | [
"Get result from pollers/reactionners (actives ones)\n\n :param action: check / action / event handler to handle\n :type action:\n :return: None\n "
] |
Please provide a description of the function:def push_actions_to_passive_satellites(self):
# We loop for our passive pollers or reactionners
for satellites in [self.my_daemon.pollers, self.my_daemon.reactionners]:
s_type = 'poller'
if satellites is self.my_daemon.reactionners:
s_type = 'reactionner'
for link in [s for s in list(satellites.values()) if s.passive]:
logger.debug("Try to send actions to the %s '%s'", s_type, link.name)
# Get actions to execute
lst = []
if s_type == 'poller':
lst = self.get_to_run_checks(do_checks=True, do_actions=False,
poller_tags=link.poller_tags,
worker_name=link.name)
elif s_type == 'reactionner':
lst = self.get_to_run_checks(do_checks=False, do_actions=True,
reactionner_tags=link.reactionner_tags,
worker_name=link.name)
if not lst:
logger.debug("Nothing to do...")
continue
logger.debug("Sending %d actions to the %s '%s'", len(lst), s_type, link.name)
link.push_actions(lst, self.instance_id) | [
"Send actions/checks to passive poller/reactionners\n\n :return: None\n "
] |
Please provide a description of the function:def get_results_from_passive_satellites(self):
# pylint: disable=broad-except
# We loop for our passive pollers or reactionners
for satellites in [self.my_daemon.pollers, self.my_daemon.reactionners]:
s_type = 'poller'
if satellites is self.my_daemon.reactionners:
s_type = 'reactionner'
for link in [s for s in list(satellites.values()) if s.passive]:
logger.debug("Trying to get results from the %s '%s'", s_type, link.name)
results = link.get_results(self.instance_id)
if results:
logger.debug("Got some results: %d results from %s", len(results), link.name)
else:
logger.debug("-> no passive results from %s", link.name)
continue
results = unserialize(results, no_load=True)
if results:
logger.info("Received %d passive results from %s", len(results), link.name)
for result in results:
logger.debug("-> result: %s", result)
# Append to the scheduler result queue
self.waiting_results.put(result) | [
"Get actions/checks results from passive poller/reactionners\n\n :return: None\n "
] |
Please provide a description of the function:def manage_internal_checks(self):
if os.getenv('ALIGNAK_MANAGE_INTERNAL', '1') != '1':
return
now = time.time()
for chk in list(self.checks.values()):
if not chk.internal:
# Exclude checks that are not internal ones
continue
# Exclude checks that are not yet ready to launch
if not chk.is_launchable(now) or chk.status not in [ACT_STATUS_SCHEDULED]:
continue
item = self.find_item_by_id(chk.ref)
# Only if active checks are enabled
if not item or not item.active_checks_enabled:
# Ask to remove the check
chk.status = ACT_STATUS_ZOMBIE
continue
logger.debug("Run internal check for %s", item)
self.nb_internal_checks += 1
# Execute internal check
item.manage_internal_check(self.hosts, self.services, chk, self.hostgroups,
self.servicegroups, self.macromodulations,
self.timeperiods)
# Ask to consume the check result
chk.status = ACT_STATUS_WAIT_CONSUME | [
"Run internal checks\n\n :return: None\n "
] |
Please provide a description of the function:def reset_topology_change_flag(self):
for i in self.hosts:
i.topology_change = False
for i in self.services:
i.topology_change = False | [
"Set topology_change attribute to False in all hosts and services\n\n :return: None\n "
] |
Please provide a description of the function:def update_retention(self):
# If we set the retention update to 0, we do not want to manage retention
# If we are not forced (like at stopping)
if self.pushed_conf.retention_update_interval == 0:
logger.debug("Should have saved retention but it is not enabled")
return
_t0 = time.time()
self.hook_point('save_retention')
statsmgr.timer('hook.retention-save', time.time() - _t0)
self.add(make_monitoring_log('INFO', 'RETENTION SAVE: %s' % self.my_daemon.name))
logger.info('Retention data saved: %.2f seconds', time.time() - _t0) | [
"Call hook point 'save_retention'.\n Retention modules will write back retention (to file, db etc)\n\n :param forced: is update forced?\n :type forced: bool\n :return: None\n "
] |
Please provide a description of the function:def retention_load(self, forced=False):
# If we set the retention update to 0, we do not want to manage retention
# If we are not forced (like at stopping)
if self.pushed_conf.retention_update_interval == 0 and not forced:
logger.debug("Should have loaded retention but it is not enabled")
return
_t0 = time.time()
self.hook_point('load_retention')
statsmgr.timer('hook.retention-load', time.time() - _t0)
self.add(make_monitoring_log('INFO', 'RETENTION LOAD: %s' % self.my_daemon.name))
logger.info('Retention data loaded: %.2f seconds', time.time() - _t0) | [
"Call hook point 'load_retention'.\n Retention modules will read retention (from file, db etc)\n\n :param forced: is load forced?\n :type forced: bool\n :return: None\n "
] |
Please provide a description of the function:def log_initial_states(self):
# Raise hosts initial status broks
for elt in self.hosts:
elt.raise_initial_state()
# And then services initial status broks
for elt in self.services:
elt.raise_initial_state() | [
"Raise hosts and services initial status logs\n\n First, raise hosts status and then services. This to allow the events log\n to be a little sorted.\n\n :return: None\n "
] |
Please provide a description of the function:def get_retention_data(self): # pylint: disable=too-many-branches,too-many-statements
# pylint: disable=too-many-locals
retention_data = {
'hosts': {}, 'services': {}
}
for host in self.hosts:
h_dict = {}
# Get the hosts properties and running properties
properties = host.__class__.properties
properties.update(host.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
val = getattr(host, prop)
# If a preparation function exists...
prepare_retention = entry.retention_preparation
if prepare_retention:
val = prepare_retention(host, val)
h_dict[prop] = val
retention_data['hosts'][host.host_name] = h_dict
logger.info('%d hosts sent to retention', len(retention_data['hosts']))
# Same for services
for service in self.services:
s_dict = {}
# Get the services properties and running properties
properties = service.__class__.properties
properties.update(service.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
val = getattr(service, prop)
# If a preparation function exists...
prepare_retention = entry.retention_preparation
if prepare_retention:
val = prepare_retention(service, val)
s_dict[prop] = val
retention_data['services'][(service.host_name, service.service_description)] = s_dict
logger.info('%d services sent to retention', len(retention_data['services']))
return retention_data | [
"Get all hosts and services data to be sent to the retention storage.\n\n This function only prepares the data because a module is in charge of making\n the data survive to the scheduler restart.\n\n todo: Alignak scheduler creates two separate dictionaries: hosts and services\n It would be better to merge the services into the host dictionary!\n\n :return: dict containing host and service data\n :rtype: dict\n "
] |
Please provide a description of the function:def restore_retention_data(self, data):
if 'hosts' not in data:
logger.warning("Retention data are not correct, no 'hosts' property!")
return
for host_name in data['hosts']:
# We take the dict of our value to load
host = self.hosts.find_by_name(host_name)
if host is not None:
self.restore_retention_data_item(data['hosts'][host_name], host)
statsmgr.gauge('retention.hosts', len(data['hosts']))
logger.info('%d hosts restored from retention', len(data['hosts']))
# Same for services
for (host_name, service_description) in data['services']:
# We take our dict to load
service = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if service is not None:
self.restore_retention_data_item(data['services'][(host_name, service_description)],
service)
statsmgr.gauge('retention.services', len(data['services']))
logger.info('%d services restored from retention', len(data['services'])) | [
"Restore retention data\n\n Data coming from retention will override data coming from configuration\n It is kinda confusing when you modify an attribute (external command) and it get saved\n by retention\n\n :param data: data from retention\n :type data: dict\n :return: None\n "
] |
Please provide a description of the function:def restore_retention_data_item(self, data, item):
# pylint: disable=too-many-branches, too-many-locals
# Manage the properties and running properties
properties = item.__class__.properties
properties.update(item.__class__.running_properties)
for prop, entry in list(properties.items()):
if not entry.retention:
continue
if prop not in data:
continue
# If a restoration function exists...
restore_retention = entry.retention_restoration
if restore_retention:
setattr(item, prop, restore_retention(item, data[prop]))
else:
setattr(item, prop, data[prop])
# Now manage all linked objects load from/ previous run
for notification_uuid in item.notifications_in_progress:
notification = item.notifications_in_progress[notification_uuid]
# Update the notification referenced object
notification['ref'] = item.uuid
my_notification = Notification(params=notification)
item.notifications_in_progress[notification_uuid] = my_notification
# Add a notification in the scheduler actions
self.add(my_notification)
# todo: is it useful? We do not save/restore checks in the retention data...
item.update_in_checking()
# And also add downtimes and comments
# Downtimes are in a list..
for downtime_uuid in data['downtimes']:
downtime = data['downtimes'][downtime_uuid]
# Update the downtime referenced object
downtime['ref'] = item.uuid
my_downtime = Downtime(params=downtime)
if downtime['comment_id']:
if downtime['comment_id'] not in data['comments']:
downtime['comment_id'] = ''
# case comment_id has comment dict instead uuid
# todo: This should never happen! Why this code ?
if 'uuid' in downtime['comment_id']:
data['comments'].append(downtime['comment_id'])
downtime['comment_id'] = downtime['comment_id']['uuid']
item.add_downtime(my_downtime)
# Comments are in a list..
for comment_uuid in data['comments']:
comment = data['comments'][comment_uuid]
# Update the comment referenced object
comment['ref'] = item.uuid
item.add_comment(Comment(comment))
if item.acknowledgement is not None:
# Update the comment referenced object
item.acknowledgement['ref'] = item.uuid
item.acknowledgement = Acknowledge(item.acknowledgement)
# Relink the notified_contacts as a set() of true contacts objects
# if it was loaded from the retention, it's now a list of contacts
# names
new_notified_contacts = set()
new_notified_contacts_ids = set()
for contact_name in item.notified_contacts:
contact = self.contacts.find_by_name(contact_name)
if contact is not None:
new_notified_contacts.add(contact_name)
new_notified_contacts_ids.add(contact.uuid)
item.notified_contacts = new_notified_contacts
item.notified_contacts_ids = new_notified_contacts_ids | [
"\n Restore data in item\n\n :param data: retention data of the item\n :type data: dict\n :param item: host or service item\n :type item: alignak.objects.host.Host | alignak.objects.service.Service\n :return: None\n "
] |
Please provide a description of the function:def fill_initial_broks(self, broker_name):
# pylint: disable=too-many-branches
broker_uuid = None
logger.debug("My brokers: %s", self.my_daemon.brokers)
for broker_link in list(self.my_daemon.brokers.values()):
logger.debug("Searching broker: %s", broker_link)
if broker_name == broker_link.name:
broker_uuid = broker_link.uuid
logger.info("Filling initial broks for: %s (%s)", broker_name, broker_uuid)
break
else:
if self.pushed_conf:
# I am yet configured but I do not know this broker ! Something went wrong!!!
logger.error("Requested initial broks for an unknown broker: %s", broker_name)
else:
logger.info("Requested initial broks for an unknown broker: %s", broker_name)
return 0
if self.my_daemon.brokers[broker_uuid].initialized:
logger.warning("The broker %s still got its initial broks...", broker_name)
return 0
initial_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
# First the program status
brok = self.get_program_status_brok()
self.add_brok(brok, broker_uuid)
# We can't call initial_status from all this types
# The order is important, service need host...
initial_status_types = (self.timeperiods, self.commands,
self.contacts, self.contactgroups,
self.hosts, self.hostgroups,
self.services, self.servicegroups)
self.pushed_conf.skip_initial_broks = getattr(self.pushed_conf, 'skip_initial_broks', False)
logger.debug("Skipping initial broks? %s", str(self.pushed_conf.skip_initial_broks))
if not self.pushed_conf.skip_initial_broks:
# We call initial_status from all this types
# The order is important, service need host...
initial_status_types = (self.realms, self.timeperiods, self.commands,
self.notificationways, self.contacts, self.contactgroups,
self.hosts, self.hostgroups, self.hostdependencies,
self.services, self.servicegroups, self.servicedependencies,
self.escalations)
for tab in initial_status_types:
for item in tab:
# Awful! simply to get the group members property name... :(
# todo: replace this!
member_items = None
if hasattr(item, 'members'):
member_items = getattr(self, item.my_type.replace("group", "s"))
brok = item.get_initial_status_brok(member_items)
self.add_brok(brok, broker_uuid)
# Add a brok to say that we finished all initial_pass
brok = Brok({'type': 'initial_broks_done', 'data': {'instance_id': self.instance_id}})
self.add_brok(brok, broker_uuid)
final_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
self.my_daemon.brokers[broker_uuid].initialized = True
# Send the initial broks to our modules
self.send_broks_to_modules()
# We now have raised all the initial broks
self.raised_initial_broks = True
logger.info("Created %d initial broks for %s",
final_broks_count - initial_broks_count, broker_name)
return final_broks_count - initial_broks_count | [
"Create initial broks for a specific broker\n\n :param broker_name: broker name\n :type broker_name: str\n :return: number of created broks\n "
] |
Please provide a description of the function:def consume_results(self): # pylint: disable=too-many-branches
# All results are in self.waiting_results
# We need to get them first
queue_size = self.waiting_results.qsize()
for _ in range(queue_size):
self.manage_results(self.waiting_results.get())
# Then we consume them
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_CONSUME:
logger.debug("Consuming: %s", chk)
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
# # Raise the log only when the check got consumed!
# # Else the item information are not up-to-date :/
# if self.pushed_conf.log_active_checks and not chk.passive_check:
# item.raise_check_result()
#
for check in dep_checks:
logger.debug("-> raised a dependency check: %s", chk)
self.add(check)
# loop to resolve dependencies
have_resolved_checks = True
while have_resolved_checks:
have_resolved_checks = False
# All 'finished' checks (no more dep) raise checks they depend on
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAITING_ME:
for dependent_checks in chk.depend_on_me:
# Ok, now dependent will no more wait
dependent_checks.depend_on.remove(chk.uuid)
have_resolved_checks = True
# REMOVE OLD DEP CHECK -> zombie
chk.status = ACT_STATUS_ZOMBIE
# Now, reinteger dep checks
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_DEPEND and not chk.depend_on:
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
for check in dep_checks:
self.add(check) | [
"Handle results waiting in waiting_results list.\n Check ref will call consume result and update their status\n\n :return: None\n "
] |
Please provide a description of the function:def delete_zombie_checks(self):
id_to_del = []
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_ZOMBIE:
id_to_del.append(chk.uuid)
# une petite tape dans le dos et tu t'en vas, merci...
# *pat pat* GFTO, thks :)
for c_id in id_to_del:
del self.checks[c_id] | [
"Remove checks that have a zombie status (usually timeouts)\n\n :return: None\n "
] |
Please provide a description of the function:def delete_zombie_actions(self):
id_to_del = []
for act in list(self.actions.values()):
if act.status == ACT_STATUS_ZOMBIE:
id_to_del.append(act.uuid)
# une petite tape dans le dos et tu t'en vas, merci...
# *pat pat* GFTO, thks :)
for a_id in id_to_del:
del self.actions[a_id] | [
"Remove actions that have a zombie status (usually timeouts)\n\n :return: None\n "
] |
Please provide a description of the function:def update_downtimes_and_comments(self):
# pylint: disable=too-many-branches
broks = []
now = time.time()
# Check maintenance periods
for elt in self.all_my_hosts_and_services():
if not elt.maintenance_period:
continue
if elt.in_maintenance == -1:
timeperiod = self.timeperiods[elt.maintenance_period]
if timeperiod.is_time_valid(now):
start_dt = timeperiod.get_next_valid_time_from_t(now)
end_dt = timeperiod.get_next_invalid_time_from_t(start_dt + 1) - 1
data = {
'ref': elt.uuid, 'ref_type': elt.my_type, 'start_time': start_dt,
'end_time': end_dt, 'fixed': 1, 'trigger_id': '',
'duration': 0, 'author': "Alignak",
'comment': "This downtime was automatically scheduled by Alignak "
"because of a maintenance period."
}
downtime = Downtime(data)
self.add(downtime.add_automatic_comment(elt))
elt.add_downtime(downtime)
self.add(downtime)
self.get_and_register_status_brok(elt)
elt.in_maintenance = downtime.uuid
else:
if elt.in_maintenance not in elt.downtimes:
# the main downtimes has expired or was manually deleted
elt.in_maintenance = -1
# Check the validity of contact downtimes
for elt in self.contacts:
for downtime_id in elt.downtimes:
downtime = elt.downtimes[downtime_id]
downtime.check_activation(self.contacts)
# A loop where those downtimes are removed
# which were marked for deletion (mostly by dt.exit())
for elt in self.all_my_hosts_and_services():
for downtime in list(elt.downtimes.values()):
if not downtime.can_be_deleted:
continue
logger.debug("Downtime to delete: %s", downtime.__dict__)
elt.del_downtime(downtime.uuid)
broks.append(elt.get_update_status_brok())
# Same for contact downtimes:
for elt in self.contacts:
for downtime in list(elt.downtimes.values()):
if not downtime.can_be_deleted:
continue
elt.del_downtime(downtime.uuid)
broks.append(elt.get_update_status_brok())
# Check start and stop times
for elt in self.all_my_hosts_and_services():
for downtime in list(elt.downtimes.values()):
if downtime.real_end_time < now:
# this one has expired
broks.extend(downtime.exit(self.timeperiods, self.hosts, self.services))
elif now >= downtime.start_time and downtime.fixed and not downtime.is_in_effect:
# this one has to start now
broks.extend(downtime.enter(self.timeperiods, self.hosts, self.services))
broks.append(self.find_item_by_id(downtime.ref).get_update_status_brok())
for brok in broks:
self.add(brok) | [
"Iter over all hosts and services::\n\n TODO: add some unit tests for the maintenance period feature.\n\n * Update downtime status (start / stop) regarding maintenance period\n * Register new comments in comments list\n\n :return: None\n "
] |
Please provide a description of the function:def schedule(self, elements=None):
if not elements:
elements = self.all_my_hosts_and_services()
# ask for service and hosts their next check
for elt in elements:
logger.debug("Add check for: %s", elt)
self.add(elt.schedule(self.hosts, self.services, self.timeperiods,
self.macromodulations, self.checkmodulations, self.checks)) | [
"Iterate over all hosts and services and call schedule method\n (schedule next check)\n\n If elements is None all our hosts and services are scheduled for a check.\n\n :param elements: None or list of host / services to schedule\n :type elements: None | list\n :return: None\n "
] |
Please provide a description of the function:def get_new_actions(self):
_t0 = time.time()
self.hook_point('get_new_actions')
statsmgr.timer('hook.get-new-actions', time.time() - _t0)
# ask for service and hosts their next check
for elt in self.all_my_hosts_and_services():
for action in elt.actions:
logger.debug("Got a new action for %s: %s", elt, action)
self.add(action)
# We take all, we can clear it
elt.actions = [] | [
"Call 'get_new_actions' hook point\n Iter over all hosts and services to add new actions in internal lists\n\n :return: None\n "
] |
Please provide a description of the function:def get_new_broks(self):
# ask for service and hosts their broks waiting
# be eaten
for elt in self.all_my_hosts_and_services():
for brok in elt.broks:
self.add(brok)
# We got all, clear item broks list
elt.broks = []
# Also fetch broks from contact (like contactdowntime)
for contact in self.contacts:
for brok in contact.broks:
self.add(brok)
# We got all, clear contact broks list
contact.broks = [] | [
"Iter over all hosts and services to add new broks in internal lists\n\n :return: None\n "
] |
Please provide a description of the function:def check_orphaned(self):
orphans_count = {}
now = int(time.time())
actions = list(self.checks.values()) + list(self.actions.values())
for chk in actions:
if chk.status not in [ACT_STATUS_POLLED]:
continue
time_to_orphanage = self.find_item_by_id(chk.ref).get_time_to_orphanage()
if not time_to_orphanage:
continue
if chk.t_to_go > now - time_to_orphanage:
continue
logger.info("Orphaned %s (%d s / %s / %s) check for: %s (%s)",
chk.is_a, time_to_orphanage, chk.t_to_go, now,
self.find_item_by_id(chk.ref).get_full_name(), chk)
chk._is_orphan = True
chk.status = ACT_STATUS_SCHEDULED
if chk.my_worker not in orphans_count:
orphans_count[chk.my_worker] = 0
orphans_count[chk.my_worker] += 1
for sta_name in orphans_count:
logger.warning("%d actions never came back for the satellite '%s'. "
"I reenable them for polling.",
orphans_count[sta_name], sta_name) | [
"Check for orphaned checks/actions::\n\n * status == 'in_poller' and t_to_go < now - time_to_orphanage (300 by default)\n\n if so raise a warning log.\n\n :return: None\n "
] |
Please provide a description of the function:def send_broks_to_modules(self):
t00 = time.time()
nb_sent = 0
broks = []
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = True
broks.append(brok)
if not broks:
return
logger.debug("sending %d broks to modules...", len(broks))
for mod in self.my_daemon.modules_manager.get_external_instances():
logger.debug("Look for sending to module %s", mod.get_name())
module_queue = mod.to_q
if module_queue:
to_send = [b for b in broks if mod.want_brok(b)]
module_queue.put(to_send)
nb_sent += len(to_send)
# No more need to send them
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = False
brok.sent_to_externals = True
logger.debug("Time to send %d broks (after %d secs)", nb_sent, time.time() - t00) | [
"Put broks into module queues\n Only broks without sent_to_externals to True are sent\n Only modules that ask for broks will get some\n\n :return: None\n "
] |
Please provide a description of the function:def get_scheduler_stats(self, details=False): # pylint: disable=unused-argument
# pylint: disable=too-many-locals, too-many-branches
m_solver = MacroResolver()
res = {
'_freshness': int(time.time()),
'counters': {},
'latency': self.stats['latency'],
'monitored_objects': {},
'livesynthesis': {}
}
checks_status_counts = self.get_checks_status_counts()
# Checks / actions counters
for what in (u'actions', u'checks'):
res['counters']['%s.count' % what] = len(getattr(self, what))
for status in (u'scheduled', u'in_poller', u'zombie'):
res['counters']['%s.%s' % (what, status)] = checks_status_counts[status]
if self.pushed_conf:
for _, _, strclss, _, _ in list(self.pushed_conf.types_creations.values()):
# Internal statistics
res['monitored_objects'][strclss] = len(getattr(self, strclss, []))
# Scheduler live synthesis
res['livesynthesis'] = {
'hosts_total': m_solver._get_total_hosts(),
'hosts_not_monitored': m_solver._get_total_hosts_not_monitored(),
'hosts_up_hard': m_solver._get_total_hosts_up(u'HARD'),
'hosts_up_soft': m_solver._get_total_hosts_up(u'SOFT'),
'hosts_down_hard': m_solver._get_total_hosts_down(u'HARD'),
'hosts_down_soft': m_solver._get_total_hosts_down(u'SOFT'),
'hosts_unreachable_hard': m_solver._get_total_hosts_unreachable(u'HARD'),
'hosts_unreachable_soft': m_solver._get_total_hosts_unreachable(u'SOFT'),
'hosts_problems': m_solver._get_total_hosts_problems_unhandled(),
'hosts_acknowledged': m_solver._get_total_hosts_problems_handled(),
'hosts_in_downtime': m_solver._get_total_hosts_downtimed(),
'hosts_flapping': m_solver._get_total_hosts_flapping(),
'services_total': m_solver._get_total_services(),
'services_not_monitored': m_solver._get_total_services_not_monitored(),
'services_ok_hard': m_solver._get_total_services_ok(u'HARD'),
'services_ok_soft': m_solver._get_total_services_ok(u'SOFT'),
'services_warning_hard': m_solver._get_total_services_warning(u'HARD'),
'services_warning_soft': m_solver._get_total_services_warning(u'SOFT'),
'services_critical_hard': m_solver._get_total_services_critical(u'HARD'),
'services_critical_soft': m_solver._get_total_services_critical(u'SOFT'),
'services_unknown_hard': m_solver._get_total_services_unknown(u'HARD'),
'services_unknown_soft': m_solver._get_total_services_unknown(u'SOFT'),
'services_unreachable_hard': m_solver._get_total_services_unreachable(u'HARD'),
'services_unreachable_soft': m_solver._get_total_services_unreachable(u'SOFT'),
'services_problems': m_solver._get_total_services_problems_unhandled(),
'services_acknowledged': m_solver._get_total_services_problems_handled(),
'services_in_downtime': m_solver._get_total_services_downtimed(),
'services_flapping': m_solver._get_total_services_flapping()
}
if details:
# Hosts/services problems list
all_problems = {}
for item in self.hosts:
if item.state_type not in [u'HARD'] or item.state not in ['DOWN']:
continue
if item.is_problem and not item.problem_has_been_acknowledged:
all_problems[item.uuid] = {
'host': item.get_name(),
'service': None,
'state': item.state,
'state_type': item.state_type,
'output': item.output,
'last_state': item.last_state,
'last_state_type': item.last_state_type,
'last_state_update': item.last_state_update,
'last_state_change': item.last_state_change,
'last_hard_state_change': item.last_hard_state_change,
'last_hard_state': item.last_hard_state,
}
for item in self.services:
if item.state_type not in [u'HARD'] or item.state not in ['WARNING',
'CRITICAL']:
continue
if item.is_problem and not item.problem_has_been_acknowledged:
all_problems[item.uuid] = {
'host': item.host_name,
'service': item.get_name(),
'output': item.output,
'state': item.state,
'state_type': item.state_type,
'last_state': item.last_state,
'last_state_type': item.last_state_type,
'last_hard_state': item.last_hard_state,
'last_state_update': item.last_state_update,
'last_state_change': item.last_state_change,
'last_hard_state_change': item.last_hard_state_change,
}
res['problems'] = all_problems
all_commands = {}
# Some checks statistics: user/system time
for elt in self.all_my_hosts_and_services():
last_cmd = elt.last_check_command
if not last_cmd:
continue
cmd = os.path.split(last_cmd.split(' ', 1)[0])[1]
u_time = elt.u_time
s_time = elt.s_time
old_u_time, old_s_time = all_commands.get(cmd, (0.0, 0.0))
interval = elt.check_interval
if not interval:
interval = 1
old_u_time += u_time / interval
old_s_time += s_time / interval
all_commands[cmd] = (old_u_time, old_s_time)
# Return all the commands
res['commands'] = all_commands
return res | [
"Get the scheduler statistics\n\n :return: A dict with the following structure\n ::\n\n { 'modules': [\n {'internal': {'name': \"MYMODULE1\", 'state': 'ok'},\n {'external': {'name': \"MYMODULE2\", 'state': 'stopped'},\n ]\n 'latency': {'avg': lat_avg, 'min': lat_min, 'max': lat_max}\n 'hosts': len(self.hosts),\n 'services': len(self.services),\n 'commands': [{'cmd': c, 'u_time': u_time, 's_time': s_time}, ...] (10 first)\n 'livesynthesis': {...}\n }\n\n :rtype: dict\n "
] |
Please provide a description of the function:def get_latency_average_percentile(self):
(_, _, time_interval) = self.recurrent_works[21]
last_time = time.time() - time_interval
latencies = [s.latency for s in self.services if s.last_chk > last_time]
lat_avg, lat_min, lat_max = average_percentile(latencies)
if lat_avg is not None:
self.stats['latency']['avg'] = lat_avg
self.stats['latency']['min'] = lat_min
self.stats['latency']['max'] = lat_max
logger.debug("Latency (avg/min/max): %.2f/%.2f/%.2f", lat_avg, lat_min, lat_max) | [
"\n Get a overview of the latencies with just a 95 percentile + min/max values\n\n :return: None\n "
] |
Please provide a description of the function:def get_checks_status_counts(self, checks=None):
if checks is None:
checks = self.checks
res = defaultdict(int)
res["total"] = len(checks)
for chk in checks.values():
res[chk.status] += 1
return res | [
" Compute the counts of the different checks status and\n return it as a defaultdict(int) with the keys being the different\n status and the values being the count of the checks in that status.\n\n :checks: None or the checks you want to count their statuses.\n If None then self.checks is used.\n\n :param checks: NoneType | dict\n :type checks: None | dict\n :return:\n :rtype: defaultdict(int)\n "
] |
Please provide a description of the function:def find_item_by_id(self, object_id):
# Item id may be an item
if isinstance(object_id, Item):
return object_id
# Item id should be a uuid string
if not isinstance(object_id, string_types):
logger.debug("Find an item by id, object_id is not int nor string: %s", object_id)
return object_id
for items in [self.hosts, self.services, self.actions, self.checks, self.hostgroups,
self.servicegroups, self.contacts, self.contactgroups]:
if object_id in items:
return items[object_id]
# raise AttributeError("Item with id %s not found" % object_id) # pragma: no cover,
logger.error("Item with id %s not found", str(object_id)) # pragma: no cover,
return None | [
"Get item based on its id or uuid\n\n :param object_id:\n :type object_id: int | str\n :return:\n :rtype: alignak.objects.item.Item | None\n "
] |
Please provide a description of the function:def before_run(self):
# Actions and checks counters
self.nb_checks = 0
self.nb_internal_checks = 0
self.nb_checks_launched = 0
self.nb_actions_launched = 0
self.nb_checks_results = 0
self.nb_checks_results_timeout = 0
self.nb_checks_results_passive = 0
self.nb_checks_results_active = 0
self.nb_actions_results = 0
self.nb_actions_results_timeout = 0
self.nb_actions_results_passive = 0
self.nb_broks_dropped = 0
self.nb_checks_dropped = 0
self.nb_actions_dropped = 0
# Broks, notifications, ... counters
self.nb_broks = 0
self.nb_notifications = 0
self.nb_event_handlers = 0
self.nb_external_commands = 0
self.ticks = 0 | [
"Initialize the scheduling process"
] |
Please provide a description of the function:def run(self): # pylint: disable=too-many-locals, too-many-statements, too-many-branches
if not self.must_schedule:
logger.warning("#%d - scheduler is not active...",
self.my_daemon.loop_count)
return
# Increment ticks count
self.ticks += 1
loop_start_ts = time.time()
# Do recurrent works like schedule, consume, delete_zombie_checks
for i in self.recurrent_works:
(name, fun, nb_ticks) = self.recurrent_works[i]
# A 0 in the tick will just disable it
if nb_ticks:
if self.ticks % nb_ticks == 0:
# Call it and save the time spend in it
_t0 = time.time()
fun()
statsmgr.timer('loop.recurrent.%s' % name, time.time() - _t0)
statsmgr.timer('loop.recurrent', time.time() - loop_start_ts)
_ts = time.time()
self.push_actions_to_passive_satellites()
statsmgr.timer('loop.push_actions_to_passive_satellites', time.time() - _ts)
_ts = time.time()
self.get_results_from_passive_satellites()
statsmgr.timer('loop.get_results_from_passive_satellites', time.time() - _ts)
# Scheduler statistics
# - broks / notifications counters
if self.my_daemon.log_loop:
logger.debug("Items (loop): broks: %d, notifications: %d, checks: %d, internal checks: "
"%d, event handlers: %d, external commands: %d",
self.nb_broks, self.nb_notifications, self.nb_checks,
self.nb_internal_checks, self.nb_event_handlers, self.nb_external_commands)
statsmgr.gauge('activity.checks', self.nb_checks)
statsmgr.gauge('activity.internal_checks', self.nb_internal_checks)
statsmgr.gauge('activity.launched_checks', self.nb_checks_launched)
statsmgr.gauge('activity.checks_results', self.nb_checks_results)
statsmgr.gauge('activity.checks_results_timeout', self.nb_checks_results_timeout)
statsmgr.gauge('activity.checks_results_active', self.nb_checks_results_active)
statsmgr.gauge('activity.checks_results_passive', self.nb_checks_results_passive)
statsmgr.gauge('activity.launched_actions', self.nb_actions_launched)
statsmgr.gauge('activity.actions_results', self.nb_actions_results)
statsmgr.gauge('activity.actions_results_timeout', self.nb_actions_results_timeout)
statsmgr.gauge('activity.broks', self.nb_broks)
statsmgr.gauge('activity.external_commands', self.nb_external_commands)
statsmgr.gauge('activity.notifications', self.nb_notifications)
statsmgr.gauge('activity.event_handlers', self.nb_event_handlers)
if self.my_daemon.need_dump_environment:
_ts = time.time()
logger.debug('I must dump my memory...')
self.my_daemon.dump_environment()
self.my_daemon.need_dump_environment = False
statsmgr.timer('loop.memory_dump', time.time() - _ts)
if self.my_daemon.need_objects_dump:
_ts = time.time()
logger.debug('I must dump my objects...')
self.dump_objects()
self.dump_config()
self.my_daemon.need_objects_dump = False
statsmgr.timer('loop.objects_dump', time.time() - _ts)
_ts = time.time()
self.hook_point('scheduler_tick')
statsmgr.timer('loop.hook-tick', time.time() - _ts)
if self.my_daemon.log_loop:
elapsed_time = time.time() - self.my_daemon.start_time
logger.debug("Check average (total) = %d checks results, %.2f checks/s",
self.nb_checks, self.nb_checks / elapsed_time)
if self.nb_checks_dropped > 0 \
or self.nb_broks_dropped > 0 or self.nb_actions_dropped > 0:
logger.warning("We dropped %d checks, %d broks and %d actions",
self.nb_checks_dropped, self.nb_broks_dropped, self.nb_actions_dropped)
statsmgr.gauge('activity.broks_dropped', self.nb_broks_dropped)
statsmgr.gauge('activity.checks_dropped', self.nb_checks_dropped)
statsmgr.gauge('activity.actions_dropped', self.nb_actions_dropped)
self.nb_checks_dropped = self.nb_broks_dropped = self.nb_actions_dropped = 0 | [
"Main scheduler function::\n\n * Load retention\n * Call 'pre_scheduler_mod_start' hook point\n * Start modules\n * Schedule first checks\n * Init connection with pollers/reactionners\n * Run main loop\n\n * Do recurrent works\n * Push/Get actions to passive satellites\n * Update stats\n * Call 'scheduler_tick' hook point\n\n * Save retention (on quit)\n\n :return: None\n "
] |
Please provide a description of the function:def add(self, elt):
# external commands may be received as a dictionary when pushed from the WebUI
if isinstance(elt, dict) and 'my_type' in elt and elt['my_type'] == "externalcommand":
if 'cmd_line' not in elt:
logger.debug("Received a bad formated external command: %s. "
"No cmd_line!", elt)
return
logger.debug("Received a dictionary external command: %s", elt)
if 'creation_timestamp' not in elt:
elt['creation_timestamp'] = None
elt = ExternalCommand(elt['cmd_line'], elt['creation_timestamp'])
if isinstance(elt, Brok):
# For brok, we tag the brok with our instance_id
elt.instance_id = self.instance_id
if elt.type == 'monitoring_log':
# The brok is a monitoring event
with self.events_lock:
self.events.append(elt)
statsmgr.counter('events', 1)
else:
with self.broks_lock:
self.broks.append(elt)
statsmgr.counter('broks.added', 1)
elif isinstance(elt, ExternalCommand):
logger.debug("Queuing an external command: %s", str(ExternalCommand.__dict__))
self.unprocessed_external_commands.append(elt)
statsmgr.counter('external-commands.added', 1) | [
"Generic function to add objects to the daemon internal lists.\n Manage Broks, External commands\n\n :param elt: object to add\n :type elt: alignak.AlignakObject\n :return: None\n "
] |
Please provide a description of the function:def setup_new_conf(self):
# Execute the base class treatment...
super(Receiver, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
logger.debug("Got config: %s", self.cur_conf)
# Configure and start our modules
if not self.have_modules:
try:
self.modules = unserialize(self.cur_conf['modules'], no_load=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.info("I received some modules configuration: %s", self.modules)
self.have_modules = True
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
# Now create the external commands manager
# We are a receiver: our role is to get and dispatch commands to the schedulers
global_conf = self.cur_conf.get('global_conf', None)
if not global_conf:
logger.error("Received a configuration without any global_conf! "
"This may hide a configuration problem with the "
"realms and the manage_sub_realms of the satellites!")
global_conf = {
'accept_passive_unknown_check_results': False,
'log_external_commands': True
}
self.external_commands_manager = \
ExternalCommandManager(None, 'receiver', self,
global_conf.get(
'accept_passive_unknown_check_results', False),
global_conf.get(
'log_external_commands', False))
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
my_satellites = self.get_links_of_type(s_type='')
for satellite in list(my_satellites.values()):
logger.info("- : %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Now I have a configuration!
self.have_conf = True | [
"Receiver custom setup_new_conf method\n\n This function calls the base satellite treatment and manages the configuration needed\n for a receiver daemon:\n - get and configure its satellites\n - configure the modules\n\n :return: None\n "
] |
Please provide a description of the function:def get_external_commands_from_arbiters(self):
for arbiter_link_uuid in self.arbiters:
link = self.arbiters[arbiter_link_uuid]
if not link.active:
logger.debug("The arbiter '%s' is not active, it is not possible to get "
"its external commands!", link.name)
continue
try:
logger.debug("Getting external commands from: %s", link.name)
external_commands = link.get_external_commands()
if external_commands:
logger.debug("Got %d commands from: %s", len(external_commands), link.name)
else:
# Simple protection against None value
external_commands = []
for external_command in external_commands:
self.add(external_command)
except LinkError:
logger.warning("Arbiter connection failed, I could not get external commands!")
except Exception as exp: # pylint: disable=broad-except
logger.error("Arbiter connection failed, I could not get external commands!")
logger.exception("Exception: %s", exp) | [
"Get external commands from our arbiters\n\n As of now, only the arbiter are requested to provide their external commands that\n the receiver will push to all the known schedulers to make them being executed.\n\n :return: None\n "
] |
Please provide a description of the function:def push_external_commands_to_schedulers(self):
if not self.unprocessed_external_commands:
return
# Those are the global external commands
commands_to_process = self.unprocessed_external_commands
self.unprocessed_external_commands = []
logger.debug("Commands: %s", commands_to_process)
# Now get all external commands and put them into the good schedulers
logger.debug("Commands to process: %d commands", len(commands_to_process))
for ext_cmd in commands_to_process:
cmd = self.external_commands_manager.resolve_command(ext_cmd)
logger.debug("Resolved command: %s, result: %s", ext_cmd.cmd_line, cmd)
if cmd and cmd['global']:
# Send global command to all our schedulers
for scheduler_link_uuid in self.schedulers:
self.schedulers[scheduler_link_uuid].pushed_commands.append(ext_cmd)
# Now for all active schedulers, send the commands
count_pushed_commands = 0
count_failed_commands = 0
for scheduler_link_uuid in self.schedulers:
link = self.schedulers[scheduler_link_uuid]
if not link.active:
logger.debug("The scheduler '%s' is not active, it is not possible to push "
"external commands to its connection!", link.name)
continue
# If there are some commands for this scheduler...
commands = [ext_cmd.cmd_line for ext_cmd in link.pushed_commands]
if not commands:
logger.debug("The scheduler '%s' has no commands.", link.name)
continue
logger.debug("Sending %d commands to scheduler %s", len(commands), link.name)
sent = []
try:
sent = link.push_external_commands(commands)
except LinkError:
logger.warning("Scheduler connection failed, I could not push external commands!")
# Whether we sent the commands or not, clean the scheduler list
link.pushed_commands = []
# If we didn't sent them, add the commands to the arbiter list
if sent:
statsmgr.gauge('external-commands.pushed.%s' % link.name, len(commands))
count_pushed_commands = count_pushed_commands + len(commands)
else:
count_failed_commands = count_failed_commands + len(commands)
statsmgr.gauge('external-commands.failed.%s' % link.name, len(commands))
# Kepp the not sent commands... for a next try
self.external_commands.extend(commands)
statsmgr.gauge('external-commands.pushed.all', count_pushed_commands)
statsmgr.gauge('external-commands.failed.all', count_failed_commands) | [
"Push received external commands to the schedulers\n\n :return: None\n "
] |
Please provide a description of the function:def do_loop_turn(self):
# Begin to clean modules
self.check_and_del_zombie_modules()
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.info("I got a new configuration...")
# Manage the new configuration
self.setup_new_conf()
# Maybe external modules raised 'objects'
# we should get them
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer('core.get-objects-from-queues', time.time() - _t0)
# Get external commands from the arbiters...
_t0 = time.time()
self.get_external_commands_from_arbiters()
statsmgr.timer('external-commands.got.time', time.time() - _t0)
statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands))
_t0 = time.time()
self.push_external_commands_to_schedulers()
statsmgr.timer('external-commands.pushed.time', time.time() - _t0)
# Say to modules it's a new tick :)
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0) | [
"Receiver daemon main loop\n\n :return: None\n "
] |
Please provide a description of the function:def get_daemon_stats(self, details=False):
# Call the base Daemon one
res = super(Receiver, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type})
counters = res['counters']
counters['external-commands'] = len(self.external_commands)
counters['external-commands-unprocessed'] = len(self.unprocessed_external_commands)
return res | [
"Increase the stats provided by the Daemon base class\n\n :return: stats dictionary\n :rtype: dict\n "
] |
Please provide a description of the function:def get_return_from(self, check):
for prop in ['exit_status', 'output', 'long_output', 'check_time', 'execution_time',
'perf_data', 'u_time', 's_time']:
setattr(self, prop, getattr(check, prop)) | [
"Update check data from action (notification for instance)\n\n :param check: action to get data from\n :type check: alignak.action.Action\n :return: None\n "
] |
Please provide a description of the function:def serialize(self):
res = super(Check, self).serialize()
if 'depend_on' in res:
del res['depend_on']
if 'depend_on_me' in res:
del res['depend_on_me']
return res | [
"This function serializes into a simple dict object.\n\n The only usage is to send to poller, and it does not need to have the\n depend_on and depend_on_me properties.\n\n :return: json representation of a Check\n :rtype: dict\n "
] |
Please provide a description of the function:def serialize(self):
# uuid is not in *_properties
res = {
'uuid': self.uuid
}
for prop in self.__class__.properties:
if not hasattr(self, prop):
continue
res[prop] = getattr(self, prop)
if isinstance(self.__class__.properties[prop], SetProp):
res[prop] = list(getattr(self, prop))
return res | [
"This function serializes into a simple dictionary object.\n\n It is used when transferring data to other daemons over the network (http)\n\n Here is the generic function that simply export attributes declared in the\n properties dictionary of the object.\n\n Note that a SetProp property will be serialized as a list.\n\n :return: Dictionary containing key and value from properties\n :rtype: dict\n "
] |
Please provide a description of the function:def fill_default(self):
for prop, entry in self.__class__.properties.items():
if hasattr(self, prop):
continue
if not hasattr(entry, 'default') or entry.default is NONE_OBJECT:
continue
if hasattr(entry.default, '__iter__'):
setattr(self, prop, copy(entry.default))
else:
setattr(self, prop, entry.default) | [
"\n Define the object properties with a default value when the property is not yet defined\n\n :return: None\n "
] |
Please provide a description of the function:def convert_conf_for_unreachable(params):
if params is None:
return
for prop in ['flap_detection_options', 'notification_options',
'snapshot_criteria', 'stalking_options']:
if prop in params:
params[prop] = [p.replace('u', 'x') for p in params[prop]]
if 'initial_state' in params and \
(params['initial_state'] == 'u' or params['initial_state'] == ['u']):
params['initial_state'] = 'x'
if 'freshness_state' in params and \
(params['freshness_state'] == 'u' or params['freshness_state'] == ['u']):
params['freshness_state'] = 'x' | [
"\n The 'u' state for UNREACHABLE has been rewritten in 'x' in:\n * flap_detection_options\n * notification_options\n * snapshot_criteria\n\n So convert value from config file to keep compatibility with Nagios\n\n :param params: parameters of the host before put in properties\n :type params: dict\n :return: None\n "
] |
Please provide a description of the function:def fill_predictive_missing_parameters(self):
if hasattr(self, 'host_name') and not hasattr(self, 'address'):
self.address = self.host_name
if hasattr(self, 'host_name') and not hasattr(self, 'alias'):
self.alias = self.host_name
if self.initial_state == 'd':
self.state = 'DOWN'
elif self.initial_state == 'x':
self.state = 'UNREACHABLE' | [
"Fill address with host_name if not already set\n and define state with initial_state\n\n :return: None\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
# Internal checks before executing inherited function...
cls = self.__class__
if hasattr(self, 'host_name'):
for char in cls.illegal_object_name_chars:
if char in self.host_name:
self.add_error("[%s::%s] host_name contains an illegal character: %s"
% (self.my_type, self.get_name(), char))
state = False
# Fred: do not alert about missing check_command for an host... this because 1/ it is
# very verbose if hosts are not checked and 2/ because it is the Nagios default behavior
# if not self.check_command:
# self.add_warning("[%s::%s] has no defined check command"
# % (self.my_type, self.get_name()))
if self.notifications_enabled and not self.contacts:
self.add_warning("[%s::%s] notifications are enabled but no contacts nor "
"contact_groups property is defined for this host"
% (self.my_type, self.get_name()))
return super(Host, self).is_correct() and state | [
"Check if this object configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get_name(self):
if not self.is_tpl():
try:
return self.host_name
except AttributeError: # outch, no hostname
return 'UNNAMEDHOST'
else:
try:
return self.name
except AttributeError: # outch, no name for this template
return 'UNNAMEDHOSTTEMPLATE' | [
"Get the host name.\n Try several attributes before returning UNNAMED*\n\n :return: The name of the host\n :rtype: str\n "
] |
Please provide a description of the function:def get_groupnames(self, hostgroups):
group_names = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_names.append(hostgroup.get_name())
return ','.join(sorted(group_names)) | [
"Get names of the host's hostgroups\n\n :return: comma separated names of hostgroups alphabetically sorted\n :rtype: str\n "
] |
Please provide a description of the function:def get_groupaliases(self, hostgroups):
group_aliases = []
for hostgroup_id in self.hostgroups:
hostgroup = hostgroups[hostgroup_id]
group_aliases.append(hostgroup.alias)
return ','.join(sorted(group_aliases)) | [
"Get aliases of the host's hostgroups\n\n :return: comma separated aliases of hostgroups alphabetically sorted\n :rtype: str\n "
] |
Please provide a description of the function:def is_excluded_for_sdesc(self, sdesc, is_tpl=False):
if not is_tpl and self.service_includes:
return sdesc not in self.service_includes
if self.service_excludes:
return sdesc in self.service_excludes
return False | [
" Check whether this host should have the passed service *description*\n be \"excluded\" or \"not included\".\n\n :param sdesc: service description\n :type sdesc:\n :param is_tpl: True if service is template, otherwise False\n :type is_tpl: bool\n :return: True if service description excluded, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def set_state_from_exit_status(self, status, notif_period, hosts, services):
now = time.time()
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# And only if we enable the impact state change
cls = self.__class__
if (cls.enable_problem_impacts_states_change and
self.is_impact and not self.state_changed_since_impact):
self.last_state = self.state_before_impact
else:
self.last_state = self.state
# There is no 1 case because it should have been managed by the caller for a host
# like the schedulingitem::consume method.
if status == 0:
self.state = u'UP'
self.state_id = 0
self.last_time_up = int(self.last_state_update)
# self.last_time_up = self.last_state_update
state_code = 'u'
elif status in (2, 3):
self.state = u'DOWN'
self.state_id = 1
self.last_time_down = int(self.last_state_update)
# self.last_time_down = self.last_state_update
state_code = 'd'
elif status == 4:
self.state = u'UNREACHABLE'
self.state_id = 4
self.last_time_unreachable = int(self.last_state_update)
# self.last_time_unreachable = self.last_state_update
state_code = 'x'
else:
self.state = u'DOWN' # exit code UNDETERMINED
self.state_id = 1
# self.last_time_down = int(self.last_state_update)
self.last_time_down = self.last_state_update
state_code = 'd'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
# Now we add a value, we update the is_flapping prop
self.update_flapping(notif_period, hosts, services)
if self.state != self.last_state and \
not (self.state == "DOWN" and self.last_state == "UNREACHABLE"):
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change | [
"Set the state in UP, DOWN, or UNREACHABLE according to the status of a check result.\n\n :param status: integer between 0 and 3 (but not 1)\n :type status: int\n :return: None\n "
] |
Please provide a description of the function:def is_state(self, status):
if status == self.state:
return True
# Now low status
if status == 'o' and self.state == u'UP':
return True
if status == 'd' and self.state == u'DOWN':
return True
if status in ['u', 'x'] and self.state == u'UNREACHABLE':
return True
return False | [
"Return if status match the current host status\n\n :param status: status to compare ( \"o\", \"d\", \"x\"). Usually comes from config files\n :type status: str\n :return: True if status <=> self.status, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def last_time_non_ok_or_up(self):
non_ok_times = [x for x in [self.last_time_down]
if x > self.last_time_up]
if not non_ok_times:
last_time_non_ok = 0 # todo: program_start would be better?
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok | [
"Get the last time the host was in a non-OK state\n\n :return: self.last_time_down if self.last_time_down > self.last_time_up, 0 otherwise\n :rtype: int\n "
] |
Please provide a description of the function:def raise_check_result(self):
if not self.__class__.log_active_checks:
return
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
elif self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'ACTIVE HOST CHECK: %s;%s;%d;%s' % (self.get_name(), self.state,
self.attempt, self.output)
)
self.broks.append(brok) | [
"Raise ACTIVE CHECK RESULT entry\n Example : \"ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say...\"\n\n :return: None\n "
] |
Please provide a description of the function:def raise_alert_log_entry(self):
if self.__class__.log_alerts:
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
if self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'HOST ALERT: %s;%s;%s;%d;%s' % (
self.get_name(), self.state, self.state_type, self.attempt, self.output
)
)
self.broks.append(brok)
if 'ALIGNAK_LOG_ALERTS' in os.environ:
if os.environ['ALIGNAK_LOG_ALERTS'] == 'WARNING':
logger.warning('HOST ALERT: %s;%s;%s;%d;%s', self.get_name(), self.state,
self.state_type, self.attempt, self.output)
else:
logger.info('HOST ALERT: %s;%s;%s;%d;%s', self.get_name(), self.state,
self.state_type, self.attempt, self.output) | [
"Raise HOST ALERT entry\n Format is : \"HOST ALERT: *get_name()*;*state*;*state_type*;*attempt*;*output*\"\n Example : \"HOST ALERT: server;DOWN;HARD;1;I don't know what to say...\"\n\n :return: None\n "
] |
Please provide a description of the function:def raise_initial_state(self):
if not self.__class__.log_initial_states:
return
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
if self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'CURRENT HOST STATE: %s;%s;%s;%d;%s' % (
self.get_name(), self.state, self.state_type, self.attempt, self.output
)
)
self.broks.append(brok) | [
"Raise CURRENT HOST ALERT entry (info level)\n Format is : \"CURRENT HOST STATE: *get_name()*;*state*;*state_type*;*attempt*;*output*\"\n Example : \"CURRENT HOST STATE: server;DOWN;HARD;1;I don't know what to say...\"\n\n :return: None\n "
] |
Please provide a description of the function:def raise_snapshot_log_entry(self, command):
if not self.__class__.log_snapshots:
return
log_level = 'info'
if self.state == 'UNREACHABLE':
log_level = 'warning'
if self.state == 'DOWN':
log_level = 'error'
brok = make_monitoring_log(
log_level, "HOST SNAPSHOT: %s;%s;%s;%s;%s" % (
self.get_name(), self.state, self.state_type, self.attempt, command.get_name()
)
)
self.broks.append(brok) | [
"Raise HOST SNAPSHOT entry (critical level)\n Format is : \"HOST SNAPSHOT: *self.get_name()*;*state*;*state_type*;*attempt*;\n *command.get_name()*\"\n Example : \"HOST SNAPSHOT: server;UP;HARD;1;notify-by-rss\"\n\n :param command: Snapshot command launched\n :type command: alignak.objects.command.Command\n :return: None\n "
] |
Please provide a description of the function:def raise_flapping_start_log_entry(self, change_ratio, threshold):
if not self.__class__.log_flappings:
return
brok = make_monitoring_log(
'info',
"HOST FLAPPING ALERT: %s;STARTED; Host appears to have started "
"flapping (%.1f%% change >= %.1f%% threshold)"
% (self.get_name(), change_ratio, threshold)
)
self.broks.append(brok) | [
"Raise HOST FLAPPING ALERT START entry (critical level)\n Format is : \"HOST FLAPPING ALERT: *self.get_name()*;STARTED;\n Host appears to have started\n flapping (*change_ratio*% change >= *threshold*% threshold)\"\n Example : \"HOST FLAPPING ALERT: server;STARTED;\n Host appears to have started\n flapping (50.6% change >= 50.0% threshold)\"\n\n :param change_ratio: percent of changing state\n :type change_ratio: float\n :param threshold: threshold (percent) to trigger this log entry\n :type threshold: float\n :return: None\n "
] |
Please provide a description of the function:def raise_acknowledge_log_entry(self):
if not self.__class__.log_acknowledgements:
return
brok = make_monitoring_log(
'info', "HOST ACKNOWLEDGE ALERT: %s;STARTED; "
"Host problem has been acknowledged" % self.get_name()
)
self.broks.append(brok) | [
"Raise HOST ACKNOWLEDGE ALERT entry (critical level)\n\n :return: None\n "
] |
Please provide a description of the function:def raise_enter_downtime_log_entry(self):
if not self.__class__.log_downtimes:
return
brok = make_monitoring_log(
'info', "HOST DOWNTIME ALERT: %s;STARTED; "
"Host has entered a period of scheduled downtime" % (self.get_name())
)
self.broks.append(brok) | [
"Raise HOST DOWNTIME ALERT entry (critical level)\n Format is : \"HOST DOWNTIME ALERT: *get_name()*;STARTED;\n Host has entered a period of scheduled downtime\"\n Example : \"HOST DOWNTIME ALERT: test_host_0;STARTED;\n Host has entered a period of scheduled downtime\"\n\n :return: None\n "
] |
Please provide a description of the function:def manage_stalking(self, check):
need_stalk = False
if check.status == u'waitconsume':
if check.exit_status == 0 and 'o' in self.stalking_options:
need_stalk = True
elif check.exit_status == 1 and 'd' in self.stalking_options:
need_stalk = True
elif check.exit_status == 2 and 'd' in self.stalking_options:
need_stalk = True
if check.output != self.output:
need_stalk = False
if need_stalk:
logger.info("Stalking %s: %s", self.get_name(), self.output) | [
"Check if the host need stalking or not (immediate recheck)\n If one stalking_options matches the exit_status ('o' <=> 0 ...) then stalk is needed\n Raise a log entry (info level) if stalk is needed\n\n :param check: finished check (check.status == 'waitconsume')\n :type check: alignak.check.Check\n :return: None\n "
] |
Please provide a description of the function:def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact):
return not contact.want_host_notification(notifways, timeperiods,
self.last_chk, self.state, notif.type,
self.business_impact, notif.command_call) | [
"Check if the notification is blocked by this contact.\n\n :param notif: notification created earlier\n :type notif: alignak.notification.Notification\n :param contact: contact we want to notify\n :type notif: alignak.objects.contact.Contact\n :return: True if the notification is blocked, False otherwise\n :rtype: bool\n "
] |
Please provide a description of the function:def get_duration(self):
mins, secs = divmod(self.duration_sec, 60)
hours, mins = divmod(mins, 60)
return "%02dh %02dm %02ds" % (hours, mins, secs) | [
"Get duration formatted\n Format is : \"HHh MMm SSs\"\n Example : \"10h 20m 40s\"\n\n :return: Formatted duration\n :rtype: str\n "
] |
Please provide a description of the function:def _tot_services_by_state(self, services, state):
return str(sum(1 for s in self.services
if services[s].state_id == state)) | [
"Get the number of service in the specified state\n\n :param state: state to filter service\n :type state:\n :return: number of service with s.state_id == state\n :rtype: int\n "
] |
Please provide a description of the function:def get_status(self, hosts, services):
if self.got_business_rule:
mapping = {
0: "UP",
1: "DOWN",
4: "UNREACHABLE",
}
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return self.state | [
"Get the status of this host\n\n :return: \"UP\", \"DOWN\", \"UNREACHABLE\" or \"n/a\" based on host state_id or business_rule state\n :rtype: str\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.