Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def register_service_into_servicegroups(service, servicegroups):
if hasattr(service, 'service_description'):
sname = service.service_description
shname = getattr(service, 'host_name', '')
if hasattr(service, 'servicegroups'):
# Todo: See if we can remove this if
if isinstance(service.servicegroups, list):
sgs = service.servicegroups
else:
sgs = service.servicegroups.split(',')
for servicegroup in sgs:
servicegroups.add_member([shname, sname], servicegroup.strip()) | [
"\n Registers a service into the service groups declared in its\n `servicegroups` attribute.\n\n :param service: The service to register\n :type service:\n :param servicegroups: The servicegroups container\n :type servicegroups:\n :return: None\n "
] |
Please provide a description of the function:def register_service_dependencies(service, servicedependencies):
# We explode service_dependencies into Servicedependency
# We just create serviceDep with goods values (as STRING!),
# the link pass will be done after
sdeps = [d.strip() for d in getattr(service, "service_dependencies", [])]
# %2=0 are for hosts, !=0 are for service_description
i = 0
hname = ''
for elt in sdeps:
if i % 2 == 0: # host
hname = elt
else: # description
desc = elt
# we can register it (service) (depend on) -> (hname, desc)
# If we do not have enough data for service, it'service no use
if hasattr(service, 'service_description') and hasattr(service, 'host_name'):
if hname == '':
hname = service.host_name
servicedependencies.add_service_dependency(
service.host_name, service.service_description, hname, desc)
i += 1 | [
"\n Registers a service dependencies.\n\n :param service: The service to register\n :type service:\n :param servicedependencies: The servicedependencies container\n :type servicedependencies:\n :return: None\n "
] |
Please provide a description of the function:def explode(self, hosts, hostgroups, contactgroups, servicegroups, servicedependencies):
# pylint: disable=too-many-locals
# Then for every service create a copy of the service with just the host
# because we are adding services, we can't just loop in it
itemkeys = list(self.items.keys())
for s_id in itemkeys:
serv = self.items[s_id]
# items::explode_host_groups_into_hosts
# take all hosts from our hostgroup_name into our host_name property
self.explode_host_groups_into_hosts(serv, hosts, hostgroups)
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(serv, contactgroups)
hnames = getattr(serv, "host_name", '')
hnames = list(set([n.strip() for n in hnames.split(',') if n.strip()]))
# hnames = strip_and_uniq(hnames)
# We will duplicate if we have multiple host_name
# or if we are a template (so a clean service)
if len(hnames) == 1:
self.index_item(serv)
else:
if len(hnames) >= 2:
self.explode_services_from_hosts(hosts, serv, hnames)
# Delete expanded source service, even if some errors exist
self.remove_item(serv)
for s_id in self.templates:
template = self.templates[s_id]
self.explode_contact_groups_into_contacts(template, contactgroups)
self.explode_services_from_templates(hosts, template)
# Explode services that have a duplicate_foreach clause
duplicates = [serv.uuid for serv in self if getattr(serv, 'duplicate_foreach', '')]
for s_id in duplicates:
serv = self.items[s_id]
self.explode_services_duplicates(hosts, serv)
if not serv.configuration_errors:
self.remove_item(serv)
to_remove = []
for service in self:
host = hosts.find_by_name(service.host_name)
if host and host.is_excluded_for(service):
to_remove.append(service)
for service in to_remove:
self.remove_item(service)
# Servicegroups property need to be fulfill for got the information
# And then just register to this service_group
for serv in self:
self.register_service_into_servicegroups(serv, servicegroups)
self.register_service_dependencies(serv, servicedependencies) | [
"\n Explodes services, from host, hostgroups, contactgroups, servicegroups and dependencies.\n\n :param hosts: The hosts container\n :type hosts: [alignak.object.host.Host]\n :param hostgroups: The hosts goups container\n :type hostgroups: [alignak.object.hostgroup.Hostgroup]\n :param contactgroups: The contacts goups container\n :type contactgroups: [alignak.object.contactgroup.Contactgroup]\n :param servicegroups: The services goups container\n :type servicegroups: [alignak.object.servicegroup.Servicegroup]\n :param servicedependencies: The services dependencies container\n :type servicedependencies: [alignak.object.servicedependency.Servicedependency]\n :return: None\n "
] |
Please provide a description of the function:def is_eligible(self, timestamp, status, notif_number, in_notif_time, interval, escal_period):
# pylint: disable=too-many-return-statements
short_states = {
u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's',
u'DOWN': 'd', u'UNREACHABLE': 'x', u'OK': 'o', u'UP': 'o'
}
# If we are not time based, we check notification numbers:
if not self.time_based:
# Begin with the easy cases
if notif_number < self.first_notification:
return False
# self.last_notification = 0 mean no end
if self.last_notification and notif_number > self.last_notification:
return False
# Else we are time based, we must check for the good value
else:
# Begin with the easy cases
if in_notif_time < self.first_notification_time * interval:
return False
if self.last_notification_time and \
in_notif_time > self.last_notification_time * interval:
return False
# If our status is not good, we bail out too
if status in short_states and short_states[status] not in self.escalation_options:
return False
# Maybe the time is not in our escalation_period
if escal_period is not None and not escal_period.is_time_valid(timestamp):
return False
# Ok, I do not see why not escalade. So it's True :)
return True | [
"Check if the escalation is eligible (notification is escalated or not)\n\n Escalation is NOT eligible in ONE of the following condition is fulfilled::\n\n * escalation is not time based and notification number not in range\n [first_notification;last_notification] (if last_notif == 0, it's infinity)\n * escalation is time based and notification time not in range\n [first_notification_time;last_notification_time] (if last_notif_time == 0, it's infinity)\n * status does not matches escalation_options ('WARNING' <=> 'w' ...)\n * escalation_period is not legit for this time (now usually)\n\n :param timestamp: timestamp to check if timeperiod is valid\n :type timestamp: int\n :param status: item status (one of the small_states key)\n :type status: str\n :param notif_number: current notification number\n :type notif_number: int\n :param in_notif_time: current notification time\n :type in_notif_time: int\n :param interval: time interval length\n :type interval: int\n :return: True if no condition has been fulfilled, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get_next_notif_time(self, t_wished, status, creation_time, interval, escal_period):
short_states = {u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's',
u'DOWN': 'd', u'UNREACHABLE': 'u', u'OK': 'o', u'UP': 'o'}
# If we are not time based, we bail out!
if not self.time_based:
return None
# Check if we are valid
if status in short_states and short_states[status] not in self.escalation_options:
return None
# Look for the min of our future validity
start = self.first_notification_time * interval + creation_time
# If we are after the classic next time, we are not asking for a smaller interval
if start > t_wished:
return None
# Maybe the time we found is not a valid one....
if escal_period is not None and not escal_period.is_time_valid(start):
return None
# Ok so I ask for my start as a possibility for the next notification time
return start | [
"Get the next notification time for the escalation\n Only legit for time based escalation\n\n :param t_wished: time we would like to send a new notification (usually now)\n :type t_wished:\n :param status: status of the host or service\n :type status:\n :param creation_time: time the notification was created\n :type creation_time:\n :param interval: time interval length\n :type interval: int\n :return: timestamp for next notification or None\n :rtype: int | None\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
# Internal checks before executing inherited function...
# If we got the _time parameters, we are time based. Unless, we are not :)
if hasattr(self, 'first_notification_time') or hasattr(self, 'last_notification_time'):
self.time_based = True
# Ok now we manage special cases...
if not hasattr(self, 'contacts') and not hasattr(self, 'contact_groups'):
self.add_error('%s: I do not have contacts nor contact_groups' % (self.get_name()))
state = False
# If time_based or not, we do not check all properties
if self.time_based:
if not hasattr(self, 'first_notification_time'):
self.add_error('%s: I do not have first_notification_time' % (self.get_name()))
state = False
if not hasattr(self, 'last_notification_time'):
self.add_error('%s: I do not have last_notification_time' % (self.get_name()))
state = False
else: # we check classical properties
if not hasattr(self, 'first_notification'):
self.add_error('%s: I do not have first_notification' % (self.get_name()))
state = False
if not hasattr(self, 'last_notification'):
self.add_error('%s: I do not have last_notification' % (self.get_name()))
state = False
# Change the special_properties definition according to time_based ...
save_special_properties = self.special_properties
if self.time_based:
self.special_properties = self.special_properties_time_based
state_parent = super(Escalation, self).is_correct()
if self.time_based:
self.special_properties = save_special_properties
return state_parent and state | [
"Check if this object configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def linkify(self, timeperiods, contacts, services, hosts):
self.linkify_with_timeperiods(timeperiods, 'escalation_period')
self.linkify_with_contacts(contacts)
self.linkify_es_by_s(services)
self.linkify_es_by_h(hosts) | [
"Create link between objects::\n\n * escalation -> host\n * escalation -> service\n * escalation -> timeperiods\n * escalation -> contact\n\n :param timeperiods: timeperiods to link\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param contacts: contacts to link\n :type contacts: alignak.objects.contact.Contacts\n :param services: services to link\n :type services: alignak.objects.service.Services\n :param hosts: hosts to link\n :type hosts: alignak.objects.host.Hosts\n :return: None\n "
] |
Please provide a description of the function:def linkify_es_by_s(self, services):
for escalation in self:
# If no host, no hope of having a service
if not hasattr(escalation, 'host_name'):
continue
es_hname, sdesc = escalation.host_name, escalation.service_description
if not es_hname.strip() or not sdesc.strip():
continue
for hname in strip_and_uniq(es_hname.split(',')):
if sdesc.strip() == '*':
slist = services.find_srvs_by_hostname(hname)
if slist is not None:
slist = [services[serv] for serv in slist]
for serv in slist:
serv.escalations.append(escalation.uuid)
else:
for sname in strip_and_uniq(sdesc.split(',')):
serv = services.find_srv_by_name_and_hostname(hname, sname)
if serv is not None:
serv.escalations.append(escalation.uuid) | [
"Add each escalation object into service.escalation attribute\n\n :param services: service list, used to look for a specific service\n :type services: alignak.objects.service.Services\n :return: None\n "
] |
Please provide a description of the function:def linkify_es_by_h(self, hosts):
for escal in self:
# If no host, no hope of having a service
if (not hasattr(escal, 'host_name') or escal.host_name.strip() == '' or
(hasattr(escal, 'service_description')
and escal.service_description.strip() != '')):
continue
# I must be NOT a escalation on for service
for hname in strip_and_uniq(escal.host_name.split(',')):
host = hosts.find_by_name(hname)
if host is not None:
host.escalations.append(escal.uuid) | [
"Add each escalation object into host.escalation attribute\n\n :param hosts: host list, used to look for a specific host\n :type hosts: alignak.objects.host.Hosts\n :return: None\n "
] |
Please provide a description of the function:def explode(self, hosts, hostgroups, contactgroups):
for i in self:
# items::explode_host_groups_into_hosts
# take all hosts from our hostgroup_name into our host_name property
self.explode_host_groups_into_hosts(i, hosts, hostgroups)
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(i, contactgroups) | [
"Loop over all escalation and explode hostsgroups in host\n and contactgroups in contacts\n\n Call Item.explode_host_groups_into_hosts and Item.explode_contact_groups_into_contacts\n\n :param hosts: host list to explode\n :type hosts: alignak.objects.host.Hosts\n :param hostgroups: hostgroup list to explode\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :param contactgroups: contactgroup list to explode\n :type contactgroups: alignak.objects.contactgroup.Contactgroups\n :return: None\n "
] |
Please provide a description of the function:def get_hosts_by_explosion(self, hostgroups):
# pylint: disable=access-member-before-definition
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hostgroup = hostgroups.find_by_name(hg_mbr.strip())
if hostgroup is not None:
value = hostgroup.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_members(value)
return self.get_hosts() | [
"\n Get hosts of this group\n\n :param hostgroups: Hostgroup object\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :return: list of hosts of this group\n :rtype: list\n "
] |
Please provide a description of the function:def add_member(self, host_name, hostgroup_name):
hostgroup = self.find_by_name(hostgroup_name)
if not hostgroup:
hostgroup = Hostgroup({'hostgroup_name': hostgroup_name,
'alias': hostgroup_name,
'members': host_name})
self.add(hostgroup)
else:
hostgroup.add_members(host_name) | [
"Add a host string to a hostgroup member\n if the host group do not exist, create it\n\n :param host_name: host name\n :type host_name: str\n :param hostgroup_name:hostgroup name\n :type hostgroup_name: str\n :return: None\n "
] |
Please provide a description of the function:def get_members_of_group(self, gname):
hostgroup = self.find_by_name(gname)
if hostgroup:
return hostgroup.get_hosts()
return [] | [
"Get all members of a group which name is given in parameter\n\n :param gname: name of the group\n :type gname: str\n :return: list of the hosts in the group\n :rtype: list[alignak.objects.host.Host]\n "
] |
Please provide a description of the function:def linkify(self, hosts=None, realms=None, forced_realms_hostgroups=True):
self.linkify_hostgroups_hosts(hosts)
self.linkify_hostgroups_realms_hosts(realms, hosts, forced_realms_hostgroups) | [
"Link hostgroups with hosts and realms\n\n :param hosts: all Hosts\n :type hosts: alignak.objects.host.Hosts\n :param realms: all Realms\n :type realms: alignak.objects.realm.Realms\n :return: None\n "
] |
Please provide a description of the function:def linkify_hostgroups_hosts(self, hosts):
for hostgroup in self:
members = hostgroup.get_hosts()
# The new members identifiers list
new_members = []
for member in members:
# member is an host name
member = member.strip()
if not member: # void entry, skip this
continue
if member == '*':
# All the hosts identifiers list
new_members.extend(list(hosts.items.keys()))
else:
host = hosts.find_by_name(member)
if host is not None:
new_members.append(host.uuid)
if hostgroup.uuid not in host.hostgroups:
host.hostgroups.append(hostgroup.uuid)
else:
hostgroup.add_unknown_members(member)
# Make members unique
new_members = list(set(new_members))
# We find the id, we replace the names
hostgroup.replace_members(new_members) | [
"We just search for each hostgroup the id of the hosts\n and replace the names by the found identifiers\n\n :param hosts: object Hosts\n :type hosts: alignak.objects.host.Hosts\n :return: None\n "
] |
Please provide a description of the function:def linkify_hostgroups_realms_hosts(self, realms, hosts, forced_realms_hostgroups=True):
# pylint: disable=too-many-locals, too-many-nested-blocks, too-many-branches
logger.info("Hostgroups / hosts / realms relation")
for hostgroup in self:
hostgroup_realm_name = hostgroup.realm
if hostgroup.realm not in realms:
realm = realms.find_by_name(hostgroup.realm)
if not realm:
continue
hostgroup.realm = realm.uuid
else:
hostgroup_realm_name = realms[hostgroup.realm].get_name()
logger.info("- hg: %s in the realm: %s ",
hostgroup.get_name(),
hostgroup_realm_name + (" (*)" if hostgroup.got_default_realm else ''))
hostgroup_hosts_errors = []
hostgroup_new_realm_name = None
hostgroup_new_realm_failed = False
for host_uuid in hostgroup:
if host_uuid not in hosts:
continue
host = hosts[host_uuid]
host_realm_name = host.realm
if host.realm not in realms:
host_realm = realms.find_by_name(host.realm)
if not host_realm:
# Host realm is unknown, an error will be raised elsewhere!
continue
else:
host_realm_name = realms[host.realm].get_name()
logger.info(" host %s is in the realm: %s",
host.get_name(),
host_realm_name + (" (*)" if host.got_default_realm else ''))
if host.got_default_realm:
# If the host got a default realm it means that no realm is specifically
# declared for this host. Thus it can inherit its realm from the one of its
# hostgroup :)
logger.debug("- apply the realm %s to the host %s from a hostgroup rule (%s)",
hostgroup_realm_name, host.get_name(), hostgroup.get_name())
host.realm = hostgroup.realm
else:
# If the host has a realm that is specifically declared then it must the same
# as its hostgroup one!
if host.realm != hostgroup.realm:
# If the hostgroup had a specified realm
if not hostgroup.got_default_realm:
# raise an error !
hostgroup.add_error(
"host %s (realm: %s) is not in the same realm than its "
"hostgroup %s (realm: %s)"
% (host.get_name(), host_realm_name,
hostgroup.get_name(), hostgroup_realm_name))
else:
# The hosts group had no realm set, it got the default All realm
if forced_realms_hostgroups:
# Temporary log an error...
hostgroup_hosts_errors.append(
"host %s (realm: %s) is not in the same realm than its "
"hostgroup %s (realm: %s)"
% (host.get_name(), host_realm_name,
hostgroup.get_name(), hostgroup_realm_name))
if not hostgroup_new_realm_name or \
hostgroup_new_realm_name == host_realm_name:
# Potential new host group realm
hostgroup_new_realm_name = host_realm_name
else:
# It still exists a candidate realm for the hostgroup,
# raise an error !
hostgroup.add_error("hostgroup %s got the default realm but "
"it has some hosts that are from different "
"realms: %s and %s. The defined realm "
"cannot be adjusted!"
% (hostgroup.get_name(),
hostgroup_new_realm_name,
host_realm_name))
hostgroup_new_realm_failed = True
break
else:
# I tolerate some hosts from different realms in an hostgroup
# that is in the default realm
# Temporary log an error...
hostgroup_hosts_errors.append(
"host %s (realm: %s) is not in the same realm as its "
"hostgroup %s (realm: %s)"
% (host.get_name(), host_realm_name,
hostgroup.get_name(), hostgroup_realm_name))
if not forced_realms_hostgroups:
for error in hostgroup_hosts_errors:
# hostgroup.add_warning(error)
logger.info(error)
else:
if hostgroup_new_realm_name is None:
# Do not change the hostgroup realm, it is not possible,
# so raise the host individual errors!
for error in hostgroup_hosts_errors:
hostgroup.add_error(error)
elif hostgroup_new_realm_name:
if not hostgroup_new_realm_failed:
# Change the hostgroup realm to suit its hosts
hostgroup.add_warning("hostgroup %s gets the realm of its hosts: %s"
% (hostgroup.get_name(), hostgroup_new_realm_name))
hostgroup_new_realm = realms.find_by_name(hostgroup_new_realm_name)
hostgroup.realm = hostgroup_new_realm.uuid | [
"Link between an hostgroup and a realm is already done in the configuration parsing\n function that defines and checks the default satellites, realms, hosts and hosts groups\n consistency.\n\n This function will only raise some alerts if hosts groups and hosts that are contained\n do not belong the same realm !\n\n :param realms: object Realms\n :type realms: alignak.objects.realm.Realms\n :param hosts: object Realms\n :type hosts: alignak.objects.host.Hosts\n :return: None\n "
] |
Please provide a description of the function:def explode(self):
# We do not want a same hostgroup to be exploded again and again
# so we tag it
for tmp_hg in list(self.items.values()):
tmp_hg.already_exploded = False
for hostgroup in list(self.items.values()):
if hostgroup.already_exploded:
continue
# get_hosts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_hg in list(self.items.values()):
tmp_hg.rec_tag = False
hostgroup.get_hosts_by_explosion(self)
# We clean the tags
for tmp_hg in list(self.items.values()):
if hasattr(tmp_hg, 'rec_tag'):
del tmp_hg.rec_tag
del tmp_hg.already_exploded | [
"\n Fill members with hostgroup_members\n\n :return: None\n "
] |
Please provide a description of the function:def run(self):
def _started_callback():
cherrypy.log("CherryPy engine started and listening...")
self.cherrypy_thread = None
try:
cherrypy.log("Starting CherryPy engine on %s" % (self.uri))
self.cherrypy_thread = cherrypy.engine.start_with_callback(_started_callback)
cherrypy.engine.block()
cherrypy.log("Exited from the engine block")
except socket.error as exp:
raise PortNotFree("Error: Sorry, the HTTP server did not started correctly: error: %s"
% (str(exp))) | [
"Wrapper to start the CherryPy server\n\n This function throws a PortNotFree exception if any socket error is raised.\n\n :return: None\n ",
"Callback function when Cherrypy Engine is started"
] |
Please provide a description of the function:def stop(self): # pylint: disable=no-self-use
cherrypy.log("Stopping CherryPy engine (current state: %s)..." % cherrypy.engine.state)
try:
cherrypy.engine.exit()
except RuntimeWarning:
pass
except SystemExit:
cherrypy.log('SystemExit raised: shutting down bus')
cherrypy.log("Stopped") | [
"Wrapper to stop the CherryPy server\n\n :return: None\n "
] |
Please provide a description of the function:def create_queues(self, manager=None):
self.clear_queues(manager)
# If no Manager() object, go with classic Queue()
if not manager:
self.from_q = Queue()
self.to_q = Queue()
else:
self.from_q = manager.Queue()
self.to_q = manager.Queue() | [
"\n Create the shared queues that will be used by alignak daemon\n process and this module process.\n But clear queues if they were already set before recreating new one.\n\n Note:\n If manager is None, then we are running the unit tests for the modules and\n we must create some queues for the external modules without a SyncManager\n\n :param manager: Manager() object\n :type manager: None | object\n :return: None\n "
] |
Please provide a description of the function:def clear_queues(self, manager):
for queue in (self.to_q, self.from_q):
if queue is None:
continue
# If we got no manager, we directly call the clean
if not manager:
try:
queue.close()
queue.join_thread()
except AttributeError:
pass
# else:
# q._callmethod('close')
# q._callmethod('join_thread')
self.to_q = self.from_q = None | [
"Release the resources associated to the queues of this instance\n\n :param manager: Manager() object\n :type manager: None | object\n :return: None\n "
] |
Please provide a description of the function:def start_module(self):
try:
self._main()
except Exception as exp:
logger.exception('%s', traceback.format_exc())
raise Exception(exp) | [
"Wrapper for _main function.\n Catch and raise any exception occurring in the main function\n\n :return: None\n "
] |
Please provide a description of the function:def start(self, http_daemon=None): # pylint: disable=unused-argument
if not self.is_external:
return
if self.process:
self.stop_process()
logger.info("Starting external process for module %s...", self.name)
proc = Process(target=self.start_module, args=(), group=None)
# Under windows we should not call start() on an object that got its process
# as an object, so we remove it and we set it in a earlier start
try:
del self.properties['process']
except KeyError:
pass
proc.start()
# We save the process data AFTER the fork()
self.process = proc
self.properties['process'] = proc
logger.info("%s is now started (pid=%d)", self.name, proc.pid) | [
"Actually restart the process if the module is external\n Try first to stop the process and create a new Process instance\n with target start_module.\n Finally start process.\n\n :param http_daemon: Not used here but can be used in other modules\n :type http_daemon: None | object\n :return: None\n "
] |
Please provide a description of the function:def kill(self):
logger.info("Killing external module (pid=%d) for module %s...",
self.process.pid, self.name)
if os.name == 'nt':
self.process.terminate()
else:
self.process.terminate()
# Wait for 10 seconds before killing the process abruptly
self.process.join(timeout=KILL_TIME)
# You do not let me another choice guy...
if self.process.is_alive():
logger.warning("%s is still living %d seconds after a normal kill, "
"I help it to die", self.name, KILL_TIME)
os.kill(self.process.pid, signal.SIGKILL)
self.process.join(1)
if self.process.is_alive():
logger.error("%s still living after brutal kill, I leave it.", self.name)
logger.info("External module killed") | [
"Sometime terminate() is not enough, we must \"help\"\n external modules to die...\n\n :return: None\n "
] |
Please provide a description of the function:def stop_process(self):
if not self.process:
return
logger.info("I'm stopping module %r (pid=%d)", self.name, self.process.pid)
self.kill()
# Clean inner process reference
self.process = None | [
"Request the module process to stop and release it\n\n :return: None\n "
] |
Please provide a description of the function:def manage_brok(self, brok):
manage = getattr(self, 'manage_' + brok.type + '_brok', None)
if not manage:
return False
# Be sure the brok is prepared before calling the function
brok.prepare()
return manage(brok) | [
"Request the module to manage the given brok.\n There are a lot of different possible broks to manage. The list is defined\n in the Brok class.\n\n An internal module may redefine this function or, easier, define only the function\n for the brok it is interested with. Hence a module interested in the `service_check_result`\n broks will only need to define a function named as `manage_service_check_result_brok`\n\n :param brok:\n :type brok:\n :return:\n :rtype:\n "
] |
Please provide a description of the function:def manage_signal(self, sig, frame): # pylint: disable=unused-argument
logger.info("received a signal: %s", SIGNALS_TO_NAMES_DICT[sig])
if sig == signal.SIGHUP:
# if SIGHUP, reload configuration in arbiter
logger.info("Modules are not able to reload their configuration. "
"Stopping the module...")
logger.info("Request to stop the module")
self.interrupted = True | [
"Generic function to handle signals\n\n Only called when the module process received SIGINT or SIGKILL.\n\n Set interrupted attribute to True, self.process to None and returns\n\n :param sig: signal sent\n :type sig:\n :param frame: frame before catching signal\n :type frame:\n :return: None\n "
] |
Please provide a description of the function:def set_signal_handler(self, sigs=None):
if sigs is None:
sigs = (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGHUP)
func = self.manage_signal
if os.name == "nt": # pragma: no cover, no Windows implementation currently
try:
import win32api
win32api.SetConsoleCtrlHandler(func, True)
except ImportError:
version = ".".join([str(i) for i in os.sys.version_info[:2]])
raise Exception("pywin32 not installed for Python " + version)
else:
for sig in sigs:
signal.signal(sig, func) | [
"Set the signal handler to manage_signal (defined in this class)\n\n Only set handlers for:\n - signal.SIGTERM, signal.SIGINT\n - signal.SIGUSR1, signal.SIGUSR2\n - signal.SIGHUP\n\n :return: None\n "
] |
Please provide a description of the function:def _main(self):
self.set_proctitle(self.name)
self.set_signal_handler()
logger.info("process for module %s is now running (pid=%d)", self.name, os.getpid())
# Will block here!
try:
self.main()
except (IOError, EOFError):
pass
# logger.warning('[%s] EOF exception: %s', self.name, traceback.format_exc())
except Exception as exp: # pylint: disable=broad-except
logger.exception('main function exception: %s', exp)
self.do_stop()
logger.info("process for module %s is now exiting (pid=%d)", self.name, os.getpid())
exit() | [
"module \"main\" method. Only used by external modules.\n\n :return: None\n "
] |
Please provide a description of the function:def no_block_read(output):
_buffer = ""
if not fcntl:
return _buffer
o_fd = output.fileno()
o_fl = fcntl.fcntl(o_fd, fcntl.F_GETFL)
fcntl.fcntl(o_fd, fcntl.F_SETFL, o_fl | os.O_NONBLOCK)
try:
_buffer = output.read()
except Exception: # pylint: disable=broad-except
pass
return _buffer | [
"Try to read a file descriptor in a non blocking mode\n\n If the fcntl is available (unix only) we try to read in a\n asynchronous mode, so we won't block the PIPE at 64K buffer\n (deadlock...)\n\n :param output: file or socket to read from\n :type output: file\n :return: data read from fd\n :rtype: str\n "
] |
Please provide a description of the function:def get_local_environnement(self):
# Do not use copy.copy() here, as the resulting copy still
# changes the real environment (it is still a os._Environment
# instance).
local_env = os.environ.copy()
for local_var in self.env:
local_env[local_var] = self.env[local_var]
return local_env | [
"\n Mix the environment and the environment variables into a new local\n environment dictionary\n\n Note: We cannot just update the global os.environ because this\n would effect all other checks.\n\n :return: local environment variables\n :rtype: dict\n "
] |
Please provide a description of the function:def execute(self):
self.status = ACT_STATUS_LAUNCHED
self.check_time = time.time()
self.wait_time = 0.0001
self.last_poll = self.check_time
# Get a local env variables with our additional values
self.local_env = self.get_local_environnement()
# Initialize stdout and stderr.
self.stdoutdata = ''
self.stderrdata = ''
logger.debug("Launch command: '%s', ref: %s, timeout: %s",
self.command, self.ref, self.timeout)
if self.log_actions:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Launch command: '%s'", self.command)
else:
logger.info("Launch command: '%s'", self.command)
return self._execute() | [
"Start this action command in a subprocess.\n\n :raise: ActionError\n 'toomanyopenfiles' if too many opened files on the system\n 'no_process_launched' if arguments parsing failed\n 'process_launch_failed': if the process launch failed\n\n :return: reference to the started process\n :rtype: psutil.Process\n "
] |
Please provide a description of the function:def get_outputs(self, out, max_plugins_output_length):
# Squeeze all output after max_plugins_output_length
out = out[:max_plugins_output_length]
# manage escaped pipes
out = out.replace(r'\|', '___PROTECT_PIPE___')
# Then cuts by lines
elts = out.split('\n')
# For perf data
elts_line1 = elts[0].split('|')
# First line before | is output, strip it
self.output = elts_line1[0].strip().replace('___PROTECT_PIPE___', '|')
try:
self.output = self.output.decode('utf8', 'ignore')
except UnicodeEncodeError:
pass
except AttributeError:
pass
# Init perfdata as empty
self.perf_data = ''
# After | it is perfdata, strip it
if len(elts_line1) > 1:
self.perf_data = elts_line1[1].strip().replace('___PROTECT_PIPE___', '|')
# Now manage others lines. Before the | it's long_output
# And after it's all perf_data, \n joined
long_output = []
in_perfdata = False
for line in elts[1:]:
# if already in perfdata, direct append
if in_perfdata:
self.perf_data += ' ' + line.strip().replace('___PROTECT_PIPE___', '|')
else: # not already in perf_data, search for the | part :)
elts = line.split('|', 1)
# The first part will always be long_output
long_output.append(elts[0].strip().replace('___PROTECT_PIPE___', '|'))
if len(elts) > 1:
in_perfdata = True
self.perf_data += ' ' + elts[1].strip().replace('___PROTECT_PIPE___', '|')
# long_output is all non output and performance data, joined with \n
self.long_output = '\n'.join(long_output)
# Get sure the performance data are stripped
self.perf_data = self.perf_data.strip()
logger.debug("Command result for '%s': %d, %s", self.command, self.exit_status, self.output)
if self.log_actions:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Check result for '%s': %d, %s",
self.command, self.exit_status, self.output)
if self.perf_data:
logger.warning("Performance data for '%s': %s", self.command, self.perf_data)
else:
logger.info("Check result for '%s': %d, %s",
self.command, self.exit_status, self.output)
if self.perf_data:
logger.info("Performance data for '%s': %s", self.command, self.perf_data) | [
"Get check outputs from single output (split perfdata etc).\n\n Updates output, perf_data and long_output attributes.\n\n :param out: output data of a check\n :type out: str\n :param max_output: max plugin data length\n :type max_output: int\n :return: None\n "
] |
Please provide a description of the function:def check_finished(self, max_plugins_output_length):
# pylint: disable=too-many-branches
self.last_poll = time.time()
_, _, child_utime, child_stime, _ = os.times()
# Not yet finished...
if self.process.poll() is None:
# We must wait, but checks are variable in time so we do not wait the same
# for a little check or a long ping. So we do like TCP: slow start with a very
# shot time (0.0001 s) increased *2 but do not wait more than 0.5 s.
self.wait_time = min(self.wait_time * 2, 0.5)
now = time.time()
# This log is really spamming... uncomment if you really need this information :)
# logger.debug("%s - Process pid=%d is still alive", now, self.process.pid)
# Get standard outputs in non blocking mode from the process streams
stdout = no_block_read(self.process.stdout)
stderr = no_block_read(self.process.stderr)
try:
self.stdoutdata += stdout.decode("utf-8")
self.stderrdata += stderr.decode("utf-8")
except AttributeError:
pass
if (now - self.check_time) > self.timeout:
logger.warning("Process pid=%d spent too much time: %.2f seconds",
self.process.pid, now - self.check_time)
self._in_timeout = True
self._kill()
self.status = ACT_STATUS_TIMEOUT
self.execution_time = now - self.check_time
self.exit_status = 3
if self.log_actions:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Action '%s' exited on timeout (%d s)",
self.command, self.timeout)
else:
logger.info("Action '%s' exited on timeout (%d s)",
self.command, self.timeout)
# Do not keep the process objcet
del self.process
# Replace stdout with stderr if stdout is empty
self.stdoutdata = self.stdoutdata.strip()
if not self.stdoutdata:
self.stdoutdata = self.stderrdata
# Now grep what we want in the output
self.get_outputs(self.stdoutdata, max_plugins_output_length)
# We can clean the useless properties now
del self.stdoutdata
del self.stderrdata
# Get the user and system time
_, _, n_child_utime, n_child_stime, _ = os.times()
self.u_time = n_child_utime - child_utime
self.s_time = n_child_stime - child_stime
return
return
logger.debug("Process pid=%d exited with %d", self.process.pid, self.process.returncode)
if fcntl:
# Get standard outputs in non blocking mode from the process streams
stdout = no_block_read(self.process.stdout)
stderr = no_block_read(self.process.stderr)
else:
# Get standard outputs from the communicate function
(stdout, stderr) = self.process.communicate()
try:
self.stdoutdata += stdout.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
self.stdoutdata += stdout
try:
self.stderrdata += stderr.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
self.stderrdata += stderr
self.exit_status = self.process.returncode
if self.log_actions:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Action '%s' exited with code %d", self.command, self.exit_status)
else:
logger.info("Action '%s' exited with code %d",
self.command, self.exit_status)
# We do not need the process now
del self.process
# check for bad syntax in command line:
if (self.stderrdata.find('sh: -c: line 0: unexpected EOF') >= 0 or
(self.stderrdata.find('sh: -c: ') >= 0 and
self.stderrdata.find(': Syntax') >= 0 or
self.stderrdata.find('Syntax error: Unterminated quoted string') >= 0)):
logger.warning("Bad syntax in command line!")
# Very, very ugly. But subprocess._handle_exitstatus does
# not see a difference between a regular "exit 1" and a
# bailing out shell. Strange, because strace clearly shows
# a difference. (exit_group(1) vs. exit_group(257))
self.stdoutdata = self.stdoutdata + self.stderrdata
self.exit_status = 3
# Make sure that exit code is a valid exit code
if self.exit_status not in VALID_EXIT_STATUS:
self.exit_status = 3
# Replace stdout with stderr if stdout is empty
self.stdoutdata = self.stdoutdata.strip()
if not self.stdoutdata:
self.stdoutdata = self.stderrdata
# Now grep what we want in the output
self.get_outputs(self.stdoutdata, max_plugins_output_length)
# We can clean the useless properties now
del self.stdoutdata
del self.stderrdata
self.status = ACT_STATUS_DONE
self.execution_time = time.time() - self.check_time
# Also get the system and user times
_, _, n_child_utime, n_child_stime, _ = os.times()
self.u_time = n_child_utime - child_utime
self.s_time = n_child_stime - child_stime | [
"Handle action if it is finished (get stdout, stderr, exit code...)\n\n :param max_plugins_output_length: max plugin data length\n :type max_plugins_output_length: int\n :return: None\n "
] |
Please provide a description of the function:def copy_shell__(self, new_i):
for prop in ONLY_COPY_PROP:
setattr(new_i, prop, getattr(self, prop))
return new_i | [
"Create all attributes listed in 'ONLY_COPY_PROP' and return `self` with these attributes.\n\n :param new_i: object to\n :type new_i: object\n :return: object with new properties added\n :rtype: object\n "
] |
Please provide a description of the function:def get_contacts_by_explosion(self, contactgroups):
# pylint: disable=access-member-before-definition
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every CG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[contactgroup::%s] got a loop in contactgroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
cg_mbrs = self.get_contactgroup_members()
for cg_mbr in cg_mbrs:
contactgroup = contactgroups.find_by_name(cg_mbr.strip())
if contactgroup is not None:
value = contactgroup.get_contacts_by_explosion(contactgroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return '' | [
"\n Get contacts of this group\n\n :param contactgroups: Contactgroups object, use to look for a specific one\n :type contactgroups: alignak.objects.contactgroup.Contactgroups\n :return: list of contact of this group\n :rtype: list[alignak.objects.contact.Contact]\n "
] |
Please provide a description of the function:def add_member(self, contact_name, contactgroup_name):
contactgroup = self.find_by_name(contactgroup_name)
if not contactgroup:
contactgroup = Contactgroup({'contactgroup_name': contactgroup_name,
'alias': contactgroup_name,
'members': contact_name})
self.add_contactgroup(contactgroup)
else:
contactgroup.add_members(contact_name) | [
"Add a contact string to a contact member\n if the contact group do not exist, create it\n\n :param contact_name: contact name\n :type contact_name: str\n :param contactgroup_name: contact group name\n :type contactgroup_name: str\n :return: None\n "
] |
Please provide a description of the function:def get_members_of_group(self, gname):
contactgroup = self.find_by_name(gname)
if contactgroup:
return contactgroup.get_contacts()
return [] | [
"Get all members of a group which name is given in parameter\n\n :param gname: name of the group\n :type gname: str\n :return: list of contacts in the group\n :rtype: list[alignak.objects.contact.Contact]\n "
] |
Please provide a description of the function:def linkify_contactgroups_contacts(self, contacts):
for contactgroup in self:
mbrs = contactgroup.get_contacts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the beginning so don't care about spaces
if mbr == '': # void entry, skip this
continue
member = contacts.find_by_name(mbr)
# Maybe the contact is missing, if so, must be put in unknown_members
if member is not None:
new_mbrs.append(member.uuid)
else:
contactgroup.add_unknown_members(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
contactgroup.replace_members(new_mbrs) | [
"Link the contacts with contactgroups\n\n :param contacts: realms object to link with\n :type contacts: alignak.objects.contact.Contacts\n :return: None\n "
] |
Please provide a description of the function:def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_cg in list(self.items.values()):
tmp_cg.already_exploded = False
for contactgroup in list(self.items.values()):
if contactgroup.already_exploded:
continue
# get_contacts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_cg in list(self.items.values()):
tmp_cg.rec_tag = False
contactgroup.get_contacts_by_explosion(self)
# We clean the tags
for tmp_cg in list(self.items.values()):
if hasattr(tmp_cg, 'rec_tag'):
del tmp_cg.rec_tag
del tmp_cg.already_exploded | [
"\n Fill members with contactgroup_members\n\n :return:None\n "
] |
Please provide a description of the function:def get_return_from(self, e_handler):
for prop in ['exit_status', 'output', 'long_output', 'check_time', 'execution_time',
'perf_data']:
setattr(self, prop, getattr(e_handler, prop)) | [
"Setter of the following attributes::\n\n * exit_status\n * output\n * long_output\n * check_time\n * execution_time\n * perf_data\n\n :param e_handler: event handler to get data from\n :type e_handler: alignak.eventhandler.EventHandler\n :return: None\n "
] |
Please provide a description of the function:def add_flapping_change(self, sample):
cls = self.__class__
# If this element is not in flapping check, or
# the flapping is globally disable, bailout
if not self.flap_detection_enabled or not cls.enable_flap_detection:
return
self.flapping_changes.append(sample)
# Keep just 20 changes (global flap_history value)
flap_history = cls.flap_history
if len(self.flapping_changes) > flap_history:
self.flapping_changes.pop(0) | [
"Add a flapping sample and keep cls.flap_history samples\n\n :param sample: Sample to add\n :type sample: bool\n :return: None\n "
] |
Please provide a description of the function:def update_flapping(self, notif_period, hosts, services):
flap_history = self.__class__.flap_history
# We compute the flapping change in %
res = 0.0
i = 0
for has_changed in self.flapping_changes:
i += 1
if has_changed:
res += i * (1.2 - 0.8) / flap_history + 0.8
res = res / flap_history
res *= 100
# We can update our value
self.percent_state_change = res
# Look if we are full in our states, because if not
# the value is not accurate
is_full = len(self.flapping_changes) >= flap_history
# Now we get the low_flap_threshold and high_flap_threshold values
# They can be from self, or class
(low_flap_threshold, high_flap_threshold) = (self.low_flap_threshold,
self.high_flap_threshold)
# TODO: no more useful because a default value is defined, but is it really correct?
if low_flap_threshold == -1: # pragma: no cover, never used
cls = self.__class__
low_flap_threshold = cls.global_low_flap_threshold
if high_flap_threshold == -1: # pragma: no cover, never used
cls = self.__class__
high_flap_threshold = cls.global_high_flap_threshold
# Now we check is flapping change, but only if we got enough
# states to look at the value accuracy
if self.is_flapping and res < low_flap_threshold and is_full:
self.is_flapping = False
# We also raise a log entry
self.raise_flapping_stop_log_entry(res, low_flap_threshold)
# and a notification
self.remove_in_progress_notifications(master=True)
self.create_notifications('FLAPPINGSTOP', notif_period, hosts, services)
# And update our status for modules
self.broks.append(self.get_update_status_brok())
if not self.is_flapping and res >= high_flap_threshold and is_full:
self.is_flapping = True
# We also raise a log entry
self.raise_flapping_start_log_entry(res, high_flap_threshold)
# and a notification
self.remove_in_progress_notifications(master=True)
self.create_notifications('FLAPPINGSTART', notif_period, hosts, services)
# And update our status for modules
self.broks.append(self.get_update_status_brok()) | [
"Compute the sample list (self.flapping_changes) and determine\n whether the host/service is flapping or not\n\n :param notif_period: notification period object for this host/service\n :type notif_period: alignak.object.timeperiod.Timeperiod\n :param hosts: Hosts objects, used to create notification if necessary\n :type hosts: alignak.objects.host.Hosts\n :param services: Services objects, used to create notification if necessary\n :type services: alignak.objects.service.Services\n :return: None\n :rtype: Nonetype\n "
] |
Please provide a description of the function:def add_attempt(self):
self.attempt += 1
self.attempt = min(self.attempt, self.max_check_attempts) | [
"Add an attempt when a object is a non-ok state\n\n :return: None\n "
] |
Please provide a description of the function:def do_check_freshness(self, hosts, services, timeperiods, macromodulations, checkmodulations,
checks, when):
# pylint: disable=too-many-nested-blocks, too-many-branches
now = when
# Before, check if class (host or service) have check_freshness OK
# Then check if item want freshness, then check freshness
cls = self.__class__
if not self.in_checking and self.freshness_threshold and not self.freshness_expired:
# logger.debug("Checking freshness for %s, last state update: %s, now: %s.",
# self.get_full_name(), self.last_state_update, now)
if os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- -> checking freshness for: %s", self.get_full_name())
# If we never checked this item, we begin the freshness period
if not self.last_state_update:
self.last_state_update = int(now)
if self.last_state_update < now - \
(self.freshness_threshold + cls.additional_freshness_latency):
timeperiod = timeperiods[self.check_period]
if timeperiod is None or timeperiod.is_time_valid(now):
# Create a new check for the scheduler
chk = self.launch_check(now, hosts, services, timeperiods,
macromodulations, checkmodulations, checks)
if not chk:
logger.warning("No raised freshness check for: %s", self)
return None
chk.freshness_expiry_check = True
chk.check_time = time.time()
chk.output = "Freshness period expired: %s" % (
datetime.utcfromtimestamp(int(chk.check_time)).strftime(
"%Y-%m-%d %H:%M:%S %Z"))
if self.my_type == 'host':
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'd':
chk.exit_status = 2
elif self.freshness_state in ['u', 'x']:
chk.exit_status = 4
else:
chk.exit_status = 3
else:
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'w':
chk.exit_status = 1
elif self.freshness_state == 'c':
chk.exit_status = 2
elif self.freshness_state == 'u':
chk.exit_status = 3
elif self.freshness_state == 'x':
chk.exit_status = 4
else:
chk.exit_status = 3
return chk
else:
logger.debug("Ignored freshness check for %s, because "
"we are not in the check period.", self.get_full_name())
return None | [
"Check freshness and schedule a check now if necessary.\n\n This function is called by the scheduler if Alignak is configured to check the freshness.\n\n It is called for hosts that have the freshness check enabled if they are only\n passively checked.\n\n It is called for services that have the freshness check enabled if they are only\n passively checked and if their depending host is not in a freshness expired state\n (freshness_expiry = True).\n\n A log is raised when the freshess expiry is detected and the item is set as\n freshness_expiry.\n\n :param hosts: hosts objects, used to launch checks\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used launch checks\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used to get check_period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param macromodulations: Macro modulations objects, used in commands (notif, check)\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param checkmodulations: Checkmodulations objects, used to change check command if necessary\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param checks: checks dict, used to get checks_in_progress for the object\n :type checks: dict\n :return: A check or None\n :rtype: None | object\n "
] |
Please provide a description of the function:def set_myself_as_problem(self, hosts, services, timeperiods, bi_modulations):
# pylint: disable=too-many-locals
now = time.time()
self.is_problem = True
# we should warn potentials impact of our problem
# and they should be cool to register them so I've got
# my impacts list
impacts = list(self.impacts)
for (impact_id, status, timeperiod_id, _) in self.act_depend_of_me:
# Check if the status is ok for impact
if impact_id in hosts:
impact = hosts[impact_id]
elif impact_id in services:
impact = services[impact_id]
else:
logger.warning("Problem with my impacts: %s", self)
timeperiod = timeperiods[timeperiod_id]
for stat in status:
if self.is_state(stat):
# now check if we should bailout because of a
# not good timeperiod for dep
if timeperiod is None or timeperiod.is_time_valid(now):
new_impacts = impact.register_a_problem(self, hosts, services, timeperiods,
bi_modulations)
impacts.extend(new_impacts)
# Only update impacts and create new brok if impacts changed.
s_impacts = set(impacts)
if s_impacts == set(self.impacts):
return
self.impacts = list(s_impacts)
# We can update our business_impact value now
self.update_business_impact_value(hosts, services, timeperiods, bi_modulations)
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok()) | [
" Raise all impact from my error. I'm setting myself\n as a problem, and I register myself as this in all\n hosts/services that depend_on_me. So they are now my\n impacts\n\n :param hosts: hosts objects, used to get impacts\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to get impacts\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used to get act_depend_of_me timeperiod\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param bi_modulations: business impact modulations objects\n :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations\n :return: None\n "
] |
Please provide a description of the function:def update_business_impact_value(self, hosts, services, timeperiods, bi_modulations):
# First save our business_impact if not already do
if self.my_own_business_impact == -1:
self.my_own_business_impact = self.business_impact
# We look at our crit modulations. If one apply, we take apply it
# and it's done
in_modulation = False
for bi_modulation_id in self.business_impact_modulations:
bi_modulation = bi_modulations[bi_modulation_id]
now = time.time()
period = timeperiods[bi_modulation.modulation_period]
if period is None or period.is_time_valid(now):
self.business_impact = bi_modulation.business_impact
in_modulation = True
# We apply the first available, that's all
break
# If we truly have impacts, we get the max business_impact
# if it's huge than ourselves
if self.impacts:
bp_impacts = [hosts[elem].business_impact for elem in self.impacts if elem in hosts]
bp_impacts.extend([services[elem].business_impact for elem in self.impacts
if elem in services])
self.business_impact = max(self.business_impact, max(bp_impacts))
return
# If we are not a problem, we setup our own_crit if we are not in a
# modulation period
if self.my_own_business_impact != -1 and not in_modulation:
self.business_impact = self.my_own_business_impact | [
"We update our 'business_impact' value with the max of\n the impacts business_impact if we got impacts. And save our 'configuration'\n business_impact if we do not have do it before\n If we do not have impacts, we revert our value\n\n :param hosts: hosts objects, used to get impacts\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to get impacts\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used to get modulation_period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param bi_modulations: business impact modulations objects\n :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations\n :return: None\n TODO: SchedulingItem object should not handle other schedulingitem obj.\n We should call obj.register* on both obj.\n This is 'Java' style\n "
] |
Please provide a description of the function:def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations):
was_pb = self.is_problem
if self.is_problem:
self.is_problem = False
# we warn impacts that we are no more a problem
for impact_id in self.impacts:
if impact_id in hosts:
impact = hosts[impact_id]
else:
impact = services[impact_id]
impact.unregister_a_problem(self)
# we can just drop our impacts list
self.impacts = []
# We update our business_impact value, it's not a huge thing :)
self.update_business_impact_value(hosts, services, timeperiods, bi_modulations)
# If we were a problem, we say to everyone
# our new status, with good business_impact value
if was_pb:
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok()) | [
"Remove this objects as an impact for other schedulingitem.\n\n :param hosts: hosts objects, used to get impacts\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to get impacts\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used for update_business_impact_value\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param bi_modulations: business impact modulation are used when setting myself as problem\n :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations\n :return: None\n TODO: SchedulingItem object should not handle other schedulingitem obj.\n We should call obj.register* on both obj.\n This is 'Java' style\n "
] |
Please provide a description of the function:def register_a_problem(self, prob, hosts, services, timeperiods, bi_modulations):
# pylint: disable=too-many-locals
# Maybe we already have this problem? If so, bailout too
if prob.uuid in self.source_problems:
return []
now = time.time()
was_an_impact = self.is_impact
# Our father already look if he impacts us. So if we are here,
# it's that we really are impacted
self.is_impact = True
impacts = []
# Ok, if we are impacted, we can add it in our
# problem list
# TODO: remove this unused check
if self.is_impact:
logger.debug("I am impacted: %s", self)
# Maybe I was a problem myself, now I can say: not my fault!
if self.is_problem:
self.no_more_a_problem(hosts, services, timeperiods, bi_modulations)
# Ok, we are now impacted, we should take the good state
# but only when we just go to the impacted state
if not was_an_impact:
self.set_impact_state()
# Ok now we can be a simple impact
impacts.append(self.uuid)
if prob.uuid not in self.source_problems:
self.source_problems.append(prob.uuid)
# we should send this problem to all potential impacted that
# depend on us
for (impacted_item_id, status, timeperiod_id, _) in self.act_depend_of_me:
# Check if the status is ok for impact
if impacted_item_id in hosts:
impact = hosts[impacted_item_id]
else:
impact = services[impacted_item_id]
timeperiod = timeperiods[timeperiod_id]
for stat in status:
if self.is_state(stat):
# now check if we should bailout because of a
# not good timeperiod for dep
if timeperiod is None or timeperiod.is_time_valid(now):
new_impacts = impact.register_a_problem(prob, hosts,
services, timeperiods,
bi_modulations)
impacts.extend(new_impacts)
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok())
# now we return all impacts (can be void of course)
return impacts | [
"Call recursively by potentials impacts so they\n update their source_problems list. But do not\n go below if the problem is not a real one for me\n like If I've got multiple parents for examples\n\n :param prob: problem to register\n :type prob: alignak.objects.schedulingitem.SchedulingItem\n :param hosts: hosts objects, used to get object in act_depend_of_me\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to get object in act_depend_of_me\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param bi_modulations: business impact modulation are used when setting myself as problem\n :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations\n :return: list of host/service that are impacts\n :rtype: list[alignak.objects.schedulingitem.SchedulingItem]\n TODO: SchedulingItem object should not handle other schedulingitem obj.\n We should call obj.register* on both obj.\n This is 'Java' style\n "
] |
Please provide a description of the function:def unregister_a_problem(self, prob):
self.source_problems.remove(prob.uuid)
# For know if we are still an impact, maybe our dependencies
# are not aware of the remove of the impact state because it's not ordered
# so we can just look at if we still have some problem in our list
if not self.source_problems:
self.is_impact = False
# No more an impact, we can unset the impact state
self.unset_impact_state()
# And we register a new broks for update status
self.broks.append(self.get_update_status_brok()) | [
"Remove the problem from our problems list\n and check if we are still 'impacted'\n\n :param prob: problem to remove\n :type prob: alignak.objects.schedulingitem.SchedulingItem\n :return: None\n "
] |
Please provide a description of the function:def is_enable_action_dependent(self, hosts, services):
# Use to know if notification is raise or not
enable_action = False
for (dep_id, status, _, _) in self.act_depend_of:
if 'n' in status:
enable_action = True
else:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
p_is_down = False
dep_match = [dep.is_state(stat) for stat in status]
# check if the parent match a case, so he is down
if True in dep_match:
p_is_down = True
if not p_is_down:
enable_action = True
return enable_action | [
"\n Check if dependencies states match dependencies statuses\n This basically means that a dependency is in a bad state and\n it can explain this object state.\n\n :param hosts: hosts objects, used to get object in act_depend_of\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to get object in act_depend_of\n :type services: alignak.objects.service.Services\n :return: True if all dependencies matches the status, false otherwise\n :rtype: bool\n "
] |
Please provide a description of the function:def check_and_set_unreachability(self, hosts, services):
parent_is_down = []
for (dep_id, _, _, _) in self.act_depend_of:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']:
parent_is_down.append(True)
else:
parent_is_down.append(False)
if False in parent_is_down:
return
# all parents down
self.set_unreachable() | [
"\n Check if all dependencies are down, if yes set this object\n as unreachable.\n\n todo: this function do not care about execution_failure_criteria!\n\n :param hosts: hosts objects, used to get object in act_depend_of\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to get object in act_depend_of\n :type services: alignak.objects.service.Services\n :return: None\n "
] |
Please provide a description of the function:def do_i_raise_dependency(self, status, inherit_parents, hosts, services, timeperiods):
# pylint: disable=too-many-locals
# Do I raise dep?
for stat in status:
if self.is_state(stat):
return True
# If we do not inherit parent, we have no reason to be blocking
if not inherit_parents:
return False
# Ok, I do not raise dep, but my dep maybe raise me
now = time.time()
for (dep_id, dep_status, _, timeperiod_id, inh_parent) in self.chk_depend_of:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
timeperiod = timeperiods[timeperiod_id]
if dep.do_i_raise_dependency(dep_status, inh_parent, hosts, services, timeperiods):
if timeperiod is None or timeperiod.is_time_valid(now):
return True
# No, I really do not raise...
return False | [
"Check if this object or one of its dependency state (chk dependencies) match the status\n\n :param status: state list where dependency matters (notification failure criteria)\n :type status: list\n :param inherit_parents: recurse over parents\n :type inherit_parents: bool\n :param hosts: hosts objects, used to raise dependency check\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to raise dependency check\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: True if one state matched the status list, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def is_no_check_dependent(self, hosts, services, timeperiods):
now = time.time()
for (dep_id, status, _, timeperiod_id, inh_parent) in self.chk_depend_of:
timeperiod = timeperiods[timeperiod_id]
if timeperiod is None or timeperiod.is_time_valid(now):
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
if dep.do_i_raise_dependency(status, inh_parent, hosts, services, timeperiods):
return True
return False | [
"Check if there is some host/service that this object depend on\n has a state in the status list .\n\n :param hosts: hosts objects, used to raise dependency check\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to raise dependency check\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: True if this object has a check dependency, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def raise_dependencies_check(self, ref_check, hosts, services, timeperiods, macromodulations,
checkmodulations, checks):
# pylint: disable=too-many-locals, too-many-nested-blocks
now = time.time()
cls = self.__class__
new_checks = []
checking_checks = []
for (dep_id, _, timeperiod_id, _) in self.act_depend_of:
if dep_id in hosts:
dep_item = hosts[dep_id]
else:
dep_item = services[dep_id]
timeperiod = timeperiods[timeperiod_id]
# If the dep_item timeperiod is not valid, do not raise the dep,
# None=everytime
if timeperiod is None or timeperiod.is_time_valid(now):
# if the update is 'fresh', do not raise dep,
# cached_check_horizon = cached_service_check_horizon for service
if dep_item.last_state_update < now - cls.cached_check_horizon:
# Do not launch the check if it depends on a passive check of if a check
# is yet planned
if dep_item.active_checks_enabled:
if not dep_item.in_checking:
newchk = dep_item.launch_check(now, hosts, services, timeperiods,
macromodulations, checkmodulations,
checks, ref_check, dependent=True)
if newchk is not None:
new_checks.append(newchk)
else:
if dep_item.checks_in_progress:
check_uuid = dep_item.checks_in_progress[0]
checks[check_uuid].depend_on_me.append(ref_check)
checking_checks.append(check_uuid)
return {'new': new_checks, 'checking': checking_checks} | [
"Get checks that we depend on if EVERY following conditions is met::\n\n * timeperiod is valid\n * dep.last_state_update < now - cls.cached_check_horizon (check of dependency is \"old\")\n\n :param ref_check: Check we want to get dependency from\n :type ref_check: alignak.check.Check\n :param hosts: hosts objects, used for almost every operation\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used for almost every operation\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param macromodulations: Macro modulations objects, used in commands (notif, check)\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param checkmodulations: Checkmodulations objects, used to change check command if necessary\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param checks: checks dict, used to get checks_in_progress for the object\n :type checks: dict\n :return: check created and check in_checking\n :rtype: dict\n "
] |
Please provide a description of the function:def schedule(self, hosts, services, timeperiods, macromodulations, checkmodulations,
checks, force=False, force_time=None):
# pylint: disable=too-many-branches, too-many-arguments, too-many-locals
# next_chk is already set, do not change
# unless we force the check or the time
if self.in_checking and not (force or force_time):
return None
cls = self.__class__
# if no active check and no force, no check
if (not self.active_checks_enabled or not cls.execute_checks) and not force:
logger.debug("No check for %s", self.get_full_name())
return None
now = time.time()
current_next_check = self.next_chk
# If check_interval is 0, we should not add a check for a service
# but suppose a 5 min check interval for an host
if self.check_interval == 0 and not force:
if cls.my_type == 'service':
return None
self.check_interval = 300 / cls.interval_length
# Interval change is in a HARD state or not
# If the retry is 0, take the normal value
if self.state_type == 'HARD' or self.retry_interval == 0:
interval = self.check_interval * cls.interval_length
else:
interval = self.retry_interval * cls.interval_length
# Determine when a new check (randomize and distribute next check time)
# or recurring check should happen.
if self.next_chk == 0:
# At the start, we cannot have an interval more than cls.max_check_spread
# Global service_max_check_spread or host_max_check_spread in configuration
# is set as max_check_spread in the objects.
interval = min(interval, cls.max_check_spread * cls.interval_length)
time_add = interval * random.uniform(0.0, 1.0)
else:
time_add = interval
# Do the actual Scheduling now
# If not force_time, try to schedule
if force_time is None:
check_period = None
if getattr(self, 'check_period', None) is not None:
check_period = timeperiods[self.check_period]
# Do not calculate next_chk based on current time, but
# based on the last check execution time.
# Important for consistency of data for trending.
if self.next_chk == 0 or self.next_chk is None:
self.next_chk = now
# If the neck_chk is already in the future, do not touch it.
# But if == 0, means was 0 in fact, schedule it too
if self.next_chk <= now:
# maybe we do not have a check_period, if so, take always good (24x7)
if check_period:
self.next_chk = check_period.get_next_valid_time_from_t(
self.next_chk + time_add)
else:
self.next_chk = int(self.next_chk + time_add)
# Maybe we load next_chk from retention and the
# value of the next_chk is still in the past even after adding an interval
if self.next_chk < now:
interval = min(interval, cls.max_check_spread * cls.interval_length)
time_add = interval * random.uniform(0.0, 1.0)
# if we got a check period, use it, if now, use now
if check_period:
self.next_chk = check_period.get_next_valid_time_from_t(now + time_add)
else:
self.next_chk = int(now + time_add)
# else: keep the self.next_chk value in the future
else:
self.next_chk = int(force_time)
# If next time is None, do not go
if self.next_chk is None:
# Nagios do not raise it, I'm wondering if we should
return None
logger.debug("-> schedule: %s / %s (interval: %d, added: %d)",
self.get_full_name(),
datetime.utcfromtimestamp(self.next_chk).strftime('%Y-%m-%d %H:%M:%S'),
interval, time_add)
if current_next_check != self.next_chk and os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- -> scheduled the next check for %s "
"at %s (interval: %d, added: %d)",
self.get_full_name(),
datetime.utcfromtimestamp(self.next_chk).strftime('%Y-%m-%d %H:%M:%S'),
interval, time_add)
# Get the command to launch, and put it in queue
return self.launch_check(self.next_chk, hosts, services, timeperiods, macromodulations,
checkmodulations, checks, force=force) | [
"Main scheduling function\n If a check is in progress, or active check are disabled, do not schedule a check.\n The check interval change with HARD state::\n\n * SOFT: retry_interval\n * HARD: check_interval\n\n The first scheduling is evenly distributed, so all checks\n are not launched at the same time.\n\n\n :param hosts: hosts objects, used for almost every operation\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used for almost every operation\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param macromodulations: Macro modulations objects, used in commands (notif, check)\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param checkmodulations: Checkmodulations objects, used to change check command if necessary\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param checks: checks dict, used to get checks_in_progress for the object\n :type checks: dict\n :param force: tell if we forced this object to schedule a check\n :type force: bool\n :param force_time: time we would like the check to be scheduled\n :type force_time: None | int\n :return: None\n "
] |
Please provide a description of the function:def compensate_system_time_change(self, difference): # pragma: no cover,
# not with unit tests
# We only need to change some value
for prop in ('last_notification', 'last_state_change', 'last_hard_state_change'):
val = getattr(self, prop) # current value
# Do not go below 1970 :)
val = max(0, val + difference) # diff may be negative
setattr(self, prop, val) | [
"If a system time change occurs we have to update\n properties time related to reflect change\n\n :param difference: difference between new time and old time\n :type difference:\n :return: None\n "
] |
Please provide a description of the function:def disable_active_checks(self, checks):
self.active_checks_enabled = False
for chk_id in self.checks_in_progress:
chk = checks[chk_id]
chk.status = ACT_STATUS_WAIT_CONSUME
chk.exit_status = self.state_id
chk.output = self.output
chk.check_time = time.time()
chk.execution_time = 0
chk.perf_data = self.perf_data | [
"Disable active checks for this host/service\n Update check in progress with current object information\n\n :param checks: Checks object, to change all checks in progress\n :type checks: alignak.objects.check.Checks\n :return: None\n "
] |
Please provide a description of the function:def remove_in_progress_check(self, check):
# The check is consumed, update the in_checking properties
if check in self.checks_in_progress:
self.checks_in_progress.remove(check)
self.update_in_checking() | [
"Remove check from check in progress\n\n :param check: Check to remove\n :type check: alignak.objects.check.Check\n :return: None\n "
] |
Please provide a description of the function:def remove_in_progress_notification(self, notification):
if notification.uuid in self.notifications_in_progress:
notification.status = ACT_STATUS_ZOMBIE
del self.notifications_in_progress[notification.uuid] | [
"\n Remove a notification and mark them as zombie\n\n :param notification: the notification to remove\n :type notification: alignak.notification.Notification\n :return: None\n "
] |
Please provide a description of the function:def remove_in_progress_notifications(self, master=True):
for notification in list(self.notifications_in_progress.values()):
if master and notification.contact:
continue
# Do not remove some specific notifications
if notification.type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED',
u'CUSTOM', u'ACKNOWLEDGEMENT']:
continue
self.remove_in_progress_notification(notification) | [
"Remove all notifications from notifications_in_progress\n\n Preserves some specific notifications (downtime, ...)\n\n :param master: remove master notifications only if True (default value)\n :type master: bool\n :param force: force remove all notifications except if False\n :type force: bool\n :return:None\n "
] |
Please provide a description of the function:def get_event_handlers(self, hosts, macromodulations, timeperiods, ext_cmd=False):
cls = self.__class__
# The external command always pass
# if not, only if we enable them (auto launch)
if not ext_cmd and (not self.event_handler_enabled or not cls.enable_event_handlers):
logger.debug("Event handler is disabled for %s", self.get_full_name())
return
# If we do not force and we are in downtime, bailout
# if the no_event_handlers_during_downtimes is set in the configuration
if not ext_cmd and self.in_scheduled_downtime and cls.no_event_handlers_during_downtimes:
logger.debug("Event handler will not be launched. "
"The item %s is in a scheduled downtime", self.get_full_name())
return
if self.event_handler is not None:
event_handler = self.event_handler
elif cls.global_event_handler is not None:
event_handler = cls.global_event_handler
else:
return
macroresolver = MacroResolver()
data = self.get_data_for_event_handler(hosts)
cmd = macroresolver.resolve_command(event_handler, data, macromodulations, timeperiods)
event_h = EventHandler({
'command': cmd,
'timeout': cls.event_handler_timeout,
'ref': self.uuid,
'reactionner_tag': event_handler.reactionner_tag
})
self.raise_event_handler_log_entry(event_handler)
# ok we can put it in our temp action queue
self.actions.append(event_h) | [
"Raise event handlers if NONE of the following conditions is met::\n\n * externalcmd is False and event_handlers are disabled (globally or locally)\n * externalcmd is False and object is in scheduled dowtime and no event handlers in downtime\n * self.event_handler and cls.global_event_handler are None\n\n :param hosts: hosts objects, used to get data for macros\n :type hosts: alignak.objects.host.Hosts\n :param macromodulations: Macro modulations objects, used in commands (notif, check)\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param timeperiods: Timeperiods objects, used for macros evaluation\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param ext_cmd: tells if this function was called when handling an external_command.\n :type ext_cmd: bool\n :return: None\n "
] |
Please provide a description of the function:def get_snapshot(self, hosts, macromodulations, timeperiods): # pragma: no cover, not yet!
# We should have a snapshot_command, to be enabled and of course
# in the good time and state :D
if self.snapshot_command is None:
return
if not self.snapshot_enabled:
return
# look at if one state is matching the criteria
boolmap = [self.is_state(s) for s in self.snapshot_criteria]
if True not in boolmap:
return
# Time based checks now, we should be in the period and not too far
# from the last_snapshot
now = int(time.time())
cls = self.__class__
if self.last_snapshot > now - self.snapshot_interval * cls.interval_length: # too close
return
# no period means 24x7 :)
timeperiod = timeperiods[self.snapshot_period]
if timeperiod is not None and not timeperiod.is_time_valid(now):
return
cls = self.__class__
macroresolver = MacroResolver()
data = self.get_data_for_event_handler(hosts)
cmd = macroresolver.resolve_command(self.snapshot_command, data, macromodulations,
timeperiods)
reac_tag = self.snapshot_command.reactionner_tag
event_h = EventHandler({
'command': cmd,
'timeout': cls.event_handler_timeout,
'ref': self.uuid,
'reactionner_tag': reac_tag,
'is_snapshot': True
})
self.raise_snapshot_log_entry(self.snapshot_command)
# we save the time we launch the snap
self.last_snapshot = now
# ok we can put it in our temp action queue
self.actions.append(event_h) | [
"\n Raise snapshot event handlers if NONE of the following conditions is met::\n\n * snapshot_command is None\n * snapshot_enabled is disabled\n * snapshot_criteria does not matches current state\n * last_snapshot > now - snapshot_interval * interval_length (previous snapshot too early)\n * snapshot_period is not valid\n\n :param hosts: hosts objects, used to get data for macros\n :type hosts: alignak.objects.host.Hosts\n :param macromodulations: Macro modulations objects, used in commands (notif, check)\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param timeperiods: Timeperiods objects, used for snapshot period and macros evaluation\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n\n :return: None\n "
] |
Please provide a description of the function:def check_for_flexible_downtime(self, timeperiods, hosts, services):
status_updated = False
for downtime_id in self.downtimes:
downtime = self.downtimes[downtime_id]
# Activate flexible downtimes (do not activate triggered downtimes)
# Note: only activate if we are between downtime start and end time!
if downtime.fixed or downtime.is_in_effect:
continue
if downtime.start_time <= self.last_chk and downtime.end_time >= self.last_chk \
and self.state_id != 0 and downtime.trigger_id in ['', '0']:
# returns downtimestart notifications
self.broks.extend(downtime.enter(timeperiods, hosts, services))
status_updated = True
if status_updated is True:
self.broks.append(self.get_update_status_brok()) | [
"Enter in a downtime if necessary and raise start notification\n When a non Ok state occurs we try to raise a flexible downtime.\n\n :param timeperiods: Timeperiods objects, used for downtime period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param hosts: hosts objects, used to enter downtime\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to enter downtime\n :type services: alignak.objects.service.Services\n :return: None\n "
] |
Please provide a description of the function:def update_hard_unknown_phase_state(self):
self.was_in_hard_unknown_reach_phase = self.in_hard_unknown_reach_phase
# We do not care about SOFT state at all
# and we are sure we are no more in such a phase
if self.state_type != 'HARD' or self.last_state_type != 'HARD':
self.in_hard_unknown_reach_phase = False
# So if we are not in already in such a phase, we check for
# a start or not. So here we are sure to be in a HARD/HARD following
# state
if not self.in_hard_unknown_reach_phase:
if self.state == 'UNKNOWN' and self.last_state != 'UNKNOWN' \
or self.state == 'UNREACHABLE' and self.last_state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = True
# We also backup with which state we was before enter this phase
self.state_before_hard_unknown_reach_phase = self.last_state
return
else:
# if we were already in such a phase, look for its end
if self.state != 'UNKNOWN' and self.state != 'UNREACHABLE':
self.in_hard_unknown_reach_phase = False
# If we just exit the phase, look if we exit with a different state
# than we enter or not. If so, lie and say we were not in such phase
# because we need so to raise a new notif
if not self.in_hard_unknown_reach_phase and self.was_in_hard_unknown_reach_phase:
if self.state != self.state_before_hard_unknown_reach_phase:
self.was_in_hard_unknown_reach_phase = False | [
"Update in_hard_unknown_reach_phase attribute and\n was_in_hard_unknown_reach_phase\n UNKNOWN during a HARD state are not so important, and they should\n not raise notif about it\n\n :return: None\n "
] |
Please provide a description of the function:def consume_result(self, chk, notification_period, hosts,
services, timeperiods, macromodulations, checkmodulations, bi_modulations,
res_modulations, checks, raise_log):
# pylint: disable=too-many-locals, too-many-arguments
# pylint: disable=too-many-branches, too-many-statements
ok_up = self.__class__.ok_up # OK for service, UP for host
now = int(time.time())
if not chk.freshness_expiry_check:
self.freshness_expired = False
if 'ALIGNAK_LOG_ACTIONS' in os.environ:
if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':
logger.warning("Got check result: %d for %s",
chk.exit_status, self.get_full_name())
else:
logger.info("Got check result: %d for %s",
chk.exit_status, self.get_full_name())
if os.getenv('ALIGNAK_LOG_CHECKS', None):
level = ['info', 'warning', 'error', 'critical'][min(chk.exit_status, 3)]
func = getattr(logger, level)
func("--ALC-- check result for %s, exit: %d, output: %s",
self.get_full_name(), chk.exit_status, chk.output)
# ============ MANAGE THE CHECK ============ #
# Check is not OK, waiting to consume the results but it has some dependencies
# We put this check in waitdep state, and we create the checks of dependent items
# and nothing else ;)
if chk.exit_status != 0 and chk.status == ACT_STATUS_WAIT_CONSUME and self.act_depend_of:
chk.status = ACT_STATUS_WAIT_DEPEND
# Make sure the check know about his dep
# C is my check, and he wants dependencies
deps_checks = self.raise_dependencies_check(chk, hosts, services, timeperiods,
macromodulations, checkmodulations,
checks)
# Get checks_id of dep
for check in deps_checks['new']:
chk.depend_on.append(check.uuid)
for check_uuid in deps_checks['checking']:
chk.depend_on.append(check_uuid)
# we must wait dependent check checked and consumed
return deps_checks['new']
# We check for stalking if necessary
# so if check is here
self.manage_stalking(chk)
# ============ UPDATE ITEM INFORMATION ============ #
# Latency can be <0 is we get a check from the retention file
# so if <0, set 0
try:
self.latency = max(0, chk.check_time - chk.t_to_go)
except TypeError: # pragma: no cover, simple protection
pass
# Ok, the first check is done
self.has_been_checked = 1
# Now get data from check
self.execution_time = chk.execution_time
self.u_time = chk.u_time
self.s_time = chk.s_time
self.last_chk = int(chk.check_time)
self.output = chk.output
self.long_output = chk.long_output
if self.__class__.process_performance_data and self.process_perf_data:
self.last_perf_data = self.perf_data
self.perf_data = chk.perf_data
# Before setting state, modulate them
for resultmod_id in self.resultmodulations:
resultmod = res_modulations[resultmod_id]
if resultmod is not None:
chk.exit_status = resultmod.module_return(chk.exit_status, timeperiods)
if not chk.freshness_expiry_check:
# Only update the last state date if not in freshness expiry
self.last_state_update = now
if chk.exit_status == 1 and self.__class__.my_type == 'host':
chk.exit_status = 2
self.set_state_from_exit_status(chk.exit_status, notification_period, hosts, services)
self.last_state_type = self.state_type
self.return_code = chk.exit_status
# Raise the log only when the item information are up-to-date :/
if raise_log:
self.raise_check_result()
# we change the state, do whatever we are or not in
# an impact mode, we can put it
self.state_changed_since_impact = True
# The check is consumed, update the in_checking properties
self.remove_in_progress_check(chk.uuid)
# Used to know if a notification is raised or not
enable_action = True
# This check was waiting for a check of items it depends
if chk.status == ACT_STATUS_WAIT_DEPEND:
# Check dependencies
enable_action = self.is_enable_action_dependent(hosts, services)
# If all dependencies not ok, define item as UNREACHABLE
self.check_and_set_unreachability(hosts, services)
if chk.status in [ACT_STATUS_WAIT_CONSUME, ACT_STATUS_WAIT_DEPEND]:
# check waiting consume or waiting result of dependencies
if chk.depend_on_me != []:
# one or more checks wait this check (dependency)
chk.status = ACT_STATUS_WAITING_ME
else:
# the check go in zombie state to be removed later
chk.status = ACT_STATUS_ZOMBIE
# from UP/OK/PENDING
# to UP/OK
if chk.exit_status == 0 and self.last_state in (ok_up, 'PENDING'):
self.unacknowledge_problem()
# action in return can be notification or other checks (dependencies)
if (self.state_type == 'SOFT') and self.last_state != 'PENDING':
if self.is_max_attempts() and self.state_type == 'SOFT':
self.state_type = 'HARD'
else:
self.state_type = 'SOFT'
else:
self.attempt = 1
self.state_type = 'HARD'
# from WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN
# to UP/OK
elif chk.exit_status == 0 and self.last_state not in (ok_up, 'PENDING'):
self.unacknowledge_problem()
if self.state_type == 'SOFT':
# previous check in SOFT
if not chk.is_dependent():
self.add_attempt()
self.raise_alert_log_entry()
# Eventhandler gets OK;SOFT;++attempt, no notification needed
self.get_event_handlers(hosts, macromodulations, timeperiods)
# Now we are UP/OK HARD
self.state_type = 'HARD'
self.attempt = 1
elif self.state_type == 'HARD':
# previous check in HARD
self.raise_alert_log_entry()
# Eventhandler and notifications get OK;HARD;maxattempts
# Ok, so current notifications are not needed, we 'zombie' them
self.remove_in_progress_notifications(master=True)
if enable_action:
self.create_notifications('RECOVERY', notification_period, hosts, services)
self.get_event_handlers(hosts, macromodulations, timeperiods)
# We stay in HARD
self.attempt = 1
# I'm no more a problem if I was one
self.no_more_a_problem(hosts, services, timeperiods, bi_modulations)
# Volatile part
# Only for service
elif chk.exit_status != 0 and getattr(self, 'is_volatile', False):
# There are no repeated attempts, so the first non-ok results
# in a hard state
self.attempt = 1
self.state_type = 'HARD'
# status != 0 so add a log entry (before actions that can also raise log
# it is smarter to log error before notification)
self.raise_alert_log_entry()
self.check_for_flexible_downtime(timeperiods, hosts, services)
self.remove_in_progress_notifications(master=True)
if enable_action:
self.create_notifications('PROBLEM', notification_period, hosts, services)
# Ok, event handlers here too
self.get_event_handlers(hosts, macromodulations, timeperiods)
# PROBLEM/IMPACT
# I'm a problem only if I'm the root problem,
if enable_action:
self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations)
# from UP/OK
# to WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN
elif chk.exit_status != 0 and self.last_state in (ok_up, 'PENDING'):
self.attempt = 1
if self.is_max_attempts():
# Now we are in HARD
self.state_type = 'HARD'
self.raise_alert_log_entry()
self.remove_in_progress_notifications(master=True)
self.check_for_flexible_downtime(timeperiods, hosts, services)
if enable_action:
self.create_notifications('PROBLEM', notification_period, hosts, services)
# Oh? This is the typical go for a event handler :)
self.get_event_handlers(hosts, macromodulations, timeperiods)
# PROBLEM/IMPACT
# I'm a problem only if I'm the root problem,
if enable_action:
self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations)
else:
# This is the first NON-OK result. Initiate the SOFT-sequence
# Also launch the event handler, he might fix it.
self.state_type = 'SOFT'
if self.is_max_attempts():
self.state_type = 'HARD'
self.raise_alert_log_entry()
self.get_event_handlers(hosts, macromodulations, timeperiods)
# from WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN
# to WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN
elif chk.exit_status != 0 and self.last_state != ok_up:
if self.state_type == 'SOFT':
if not chk.is_dependent():
self.add_attempt()
# Cases where go:
# * warning soft => critical hard
# * warning soft => critical soft
if self.state != self.last_state:
self.unacknowledge_problem_if_not_sticky()
if self.is_max_attempts():
# Ok here is when we just go to the hard state
self.state_type = 'HARD'
self.raise_alert_log_entry()
self.remove_in_progress_notifications(master=True)
self.check_for_flexible_downtime(timeperiods, hosts, services)
if enable_action:
self.create_notifications('PROBLEM', notification_period, hosts, services)
# So event handlers here too
self.get_event_handlers(hosts, macromodulations, timeperiods)
# PROBLEM/IMPACT
# I'm a problem only if I'm the root problem,
if enable_action:
self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations)
else:
self.raise_alert_log_entry()
# eventhandler is launched each time during the soft state
self.get_event_handlers(hosts, macromodulations, timeperiods)
else:
# Send notifications whenever the state has changed. (W -> C)
# but not if the current state is UNKNOWN (hard C-> hard U -> hard C should
# not restart notifications)
if self.state != self.last_state:
self.update_hard_unknown_phase_state()
if not self.in_hard_unknown_reach_phase and not \
self.was_in_hard_unknown_reach_phase:
self.unacknowledge_problem_if_not_sticky()
self.raise_alert_log_entry()
self.remove_in_progress_notifications(master=True)
if enable_action:
self.create_notifications('PROBLEM', notification_period,
hosts, services)
self.get_event_handlers(hosts, macromodulations, timeperiods)
elif self.in_scheduled_downtime_during_last_check is True:
# during the last check I was in a downtime. but now
# the status is still critical and notifications
# are possible again. send an alert immediately
self.remove_in_progress_notifications(master=True)
if enable_action:
self.create_notifications('PROBLEM', notification_period,
hosts, services)
# PROBLEM/IMPACT
# Forces problem/impact registration even if no state change
# was detected as we may have a non OK state restored from
# retention data. This way, we rebuild problem/impact hierarchy.
# I'm a problem only if I'm the root problem,
if enable_action:
self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations)
# case no notification exist but notifications are enabled (for example, we
# enable notifications with external command)
if enable_action and self.notifications_enabled and \
self.current_notification_number == 0:
self.remove_in_progress_notifications(master=True)
self.create_notifications('PROBLEM', notification_period,
hosts, services)
self.update_hard_unknown_phase_state()
# Reset this flag. If it was true, actions were already taken
self.in_scheduled_downtime_during_last_check = False
# now is the time to update state_type_id
# and our last_hard_state
if self.state_type == 'HARD':
self.state_type_id = 1
self.last_hard_state = self.state
self.last_hard_state_id = self.state_id
else:
self.state_type_id = 0
# Fill last_hard_state_change to now
# if we just change from SOFT->HARD or
# in HARD we change of state (Warning->critical, or critical->ok, etc etc)
if self.state_type == 'HARD' and \
(self.last_state_type == 'SOFT' or self.last_state != self.state):
self.last_hard_state_change = int(time.time())
if self.state_type == 'HARD':
# If the check is a freshness one, set freshness as expired
if chk.freshness_expiry_check:
self.freshness_expired = True
self.last_hard_state_change = int(time.time())
# update event/problem-counters
self.update_event_and_problem_id()
# Raise a log if freshness check expired
if chk.freshness_expiry_check:
if os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- freshness expired for %s, when: %s, last checked: %s",
self.get_full_name(),
datetime.utcfromtimestamp(
self.last_hard_state_change).strftime('%Y-%m-%d %H:%M:%S'),
datetime.utcfromtimestamp(
self.last_state_update).strftime('%Y-%m-%d %H:%M:%S'))
self.raise_freshness_log_entry(int(now - self.last_state_update -
self.freshness_threshold))
self.broks.append(self.get_check_result_brok())
self.get_perfdata_command(hosts, macromodulations, timeperiods)
# Also snapshot if needed :)
self.get_snapshot(hosts, macromodulations, timeperiods)
return [] | [
"Consume a check return and send action in return\n main function of reaction of checks like raise notifications\n\n Special cases::\n\n * is_flapping: immediate notif when problem\n * is_in_scheduled_downtime: no notification\n * is_volatile: notif immediately (service only)\n\n Basically go through all cases (combination of last_state, current_state, attempt number)\n and do necessary actions (add attempt, raise notification., change state type.)\n\n :param chk: check to handle\n :type chk: alignak.objects.check.Check\n :param notification_period: notification period for this host/service\n :type notification_period: alignak.objects.timeperiod.Timeperiod\n :param hosts: hosts objects, used for almost every operation\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used for almost every operation\n :type services: alignak.objects.service.Services\n :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check)\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param macromodulations: Macro modulations objects, used in commands (notif, check)\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param checkmodulations: Checkmodulations objects, used to change check command if necessary\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param bi_modulations: business impact modulation are used when setting myself as problem\n :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations\n :param res_modulations: result modulation are used to change the ouput of a check\n :type res_modulations: alignak.object.resultmodulation.Resultmodulations\n :param checks: checks dict, used to get checks_in_progress for the object\n :type checks: dict\n :return: Dependent checks\n :rtype list[alignak.check.Check]\n "
] |
Please provide a description of the function:def update_event_and_problem_id(self):
ok_up = self.__class__.ok_up # OK for service, UP for host
if (self.state != self.last_state and self.last_state != 'PENDING' or
self.state != ok_up and self.last_state == 'PENDING'):
SchedulingItem.current_event_id += 1
self.last_event_id = self.current_event_id
self.current_event_id = SchedulingItem.current_event_id
# now the problem_id
if self.state != ok_up and self.last_state == 'PENDING':
# broken ever since i can remember
SchedulingItem.current_problem_id += 1
self.last_problem_id = self.current_problem_id
self.current_problem_id = SchedulingItem.current_problem_id
elif ok_up not in (self.state, self.last_state):
# State transitions between non-OK states
# (e.g. WARNING to CRITICAL) do not cause
# this problem id to increase.
pass
elif self.state == ok_up:
# If the service is currently in an OK state,
# this macro will be set to zero (0).
self.last_problem_id = self.current_problem_id
self.current_problem_id = 0
else:
# Every time a service (or host) transitions from
# an OK or UP state to a problem state, a global
# problem ID number is incremented by one (1).
SchedulingItem.current_problem_id += 1
self.last_problem_id = self.current_problem_id
self.current_problem_id = SchedulingItem.current_problem_id | [
"Update current_event_id and current_problem_id\n Those attributes are used for macros (SERVICEPROBLEMID ...)\n\n :return: None\n "
] |
Please provide a description of the function:def prepare_notification_for_sending(self, notif, contact, macromodulations, timeperiods,
host_ref):
if notif.status == ACT_STATUS_POLLED:
self.update_notification_command(notif, contact, macromodulations, timeperiods,
host_ref)
self.notified_contacts.add(contact.get_name())
self.notified_contacts_ids.add(contact.uuid)
self.raise_notification_log_entry(notif, contact, host_ref) | [
"Used by scheduler when a notification is ok to be sent (to reactionner).\n Here we update the command with status of now, and we add the contact to set of\n contact we notified. And we raise the log entry\n\n :param notif: notification to send\n :type notif: alignak.objects.notification.Notification\n :param macromodulations: Macro modulations objects, used in the notification command\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param timeperiods: Timeperiods objects, used to get modulation period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param host_ref: reference host (used for a service)\n :type host_ref: alignak.object.host.Host\n :return: None\n "
] |
Please provide a description of the function:def update_notification_command(self, notif, contact, macromodulations, timeperiods,
host_ref=None):
cls = self.__class__
macrosolver = MacroResolver()
data = self.get_data_for_notifications(contact, notif, host_ref)
notif.command = macrosolver.resolve_command(notif.command_call, data, macromodulations,
timeperiods)
if cls.enable_environment_macros or notif.enable_environment_macros:
notif.env = macrosolver.get_env_macros(data) | [
"Update the notification command by resolving Macros\n And because we are just launching the notification, we can say\n that this contact has been notified\n\n :param notif: notification to send\n :type notif: alignak.objects.notification.Notification\n :param contact: contact for this host/service\n :type contact: alignak.object.contact.Contact\n :param macromodulations: Macro modulations objects, used in the notification command\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param timeperiods: Timeperiods objects, used to get modulation period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param host_ref: reference host (used for a service)\n :type host_ref: alignak.object.host.Host\n :return: None\n "
] |
Please provide a description of the function:def is_escalable(self, notification, escalations, timeperiods):
cls = self.__class__
# We search since when we are in notification for escalations
# that are based on time
in_notif_time = time.time() - notification.creation_time
# Check is an escalation match the current_notification_number
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
escalation_period = timeperiods[escalation.escalation_period]
if escalation.is_eligible(notification.t_to_go, self.state, notification.notif_nb,
in_notif_time, cls.interval_length, escalation_period):
return True
return False | [
"Check if a notification can be escalated.\n Basically call is_eligible for each escalation\n\n :param notification: notification we would like to escalate\n :type notification: alignak.objects.notification.Notification\n :param escalations: Esclations objects, used to get escalation objects (period)\n :type escalations: alignak.objects.escalation.Escalations\n :param timeperiods: Timeperiods objects, used to get escalation period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: True if notification can be escalated, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get_next_notification_time(self, notif, escalations, timeperiods):
# pylint: disable=too-many-locals
res = None
now = time.time()
cls = self.__class__
# Look at the minimum notification interval
notification_interval = self.notification_interval
# and then look for currently active notifications, and take notification_interval
# if filled and less than the self value
in_notif_time = time.time() - notif.creation_time
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
escalation_period = timeperiods[escalation.escalation_period]
if escalation.is_eligible(notif.t_to_go, self.state, notif.notif_nb,
in_notif_time, cls.interval_length, escalation_period):
if escalation.notification_interval != -1 and \
escalation.notification_interval < notification_interval:
notification_interval = escalation.notification_interval
# So take the by default time
std_time = notif.t_to_go + notification_interval * cls.interval_length
# Maybe the notification comes from retention data and
# next notification alert is in the past
# if so let use the now value instead
if std_time < now:
std_time = now + notification_interval * cls.interval_length
# standard time is a good one
res = std_time
creation_time = notif.creation_time
in_notif_time = now - notif.creation_time
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
# If the escalation was already raised, we do not look for a new "early start"
if escalation.get_name() not in notif.already_start_escalations:
escalation_period = timeperiods[escalation.escalation_period]
next_t = escalation.get_next_notif_time(std_time, self.state,
creation_time, cls.interval_length,
escalation_period)
# If we got a real result (time base escalation), we add it
if next_t is not None and now < next_t < res:
res = next_t
# And we take the minimum of this result. Can be standard or escalation asked
return res | [
"Get the next notification time for a notification\n Take the standard notification_interval or ask for our escalation\n if one of them need a smaller value to escalade\n\n :param notif: Notification we need time\n :type notif: alignak.objects.notification.Notification\n :param escalations: Esclations objects, used to get escalation objects (interval, period)\n :type escalations: alignak.objects.escalation.Escalations\n :param timeperiods: Timeperiods objects, used to get escalation period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: Timestamp of next notification\n :rtype: int\n "
] |
Please provide a description of the function:def get_escalable_contacts(self, notification, escalations, timeperiods):
cls = self.__class__
# We search since when we are in notification for escalations
# that are based on this time
in_notif_time = time.time() - notification.creation_time
contacts = set()
for escalation_id in self.escalations:
escalation = escalations[escalation_id]
escalation_period = timeperiods[escalation.escalation_period]
if escalation.is_eligible(notification.t_to_go, self.state, notification.notif_nb,
in_notif_time, cls.interval_length, escalation_period):
contacts.update(escalation.contacts)
# And we tag this escalations as started now
notification.already_start_escalations.add(escalation.get_name())
return list(contacts) | [
"Get all contacts (uniq) from eligible escalations\n\n :param notification: Notification to get data from (notif number...)\n :type notification: alignak.objects.notification.Notification\n :param escalations: Esclations objects, used to get escalation objects (contact, period)\n :type escalations: alignak.objects.escalation.Escalations\n :param timeperiods: Timeperiods objects, used to get escalation period\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n\n :return: Contact uuid list that can be notified for escalation\n :rtype: list\n "
] |
Please provide a description of the function:def create_notifications(self, n_type, notification_period, hosts, services,
t_wished=None, author_data=None):
cls = self.__class__
# t_wished==None for the first notification launch after consume
# here we must look at the self.notification_period
if t_wished is None:
t_wished = time.time()
# if first notification, we must add first_notification_delay
if self.current_notification_number == 0 and n_type == 'PROBLEM':
last_time_non_ok_or_up = self.last_time_non_ok_or_up()
if last_time_non_ok_or_up:
# last_time_non_ok_or_up is an integer value - set the next second
t_wished = last_time_non_ok_or_up + 1
t_wished = t_wished + self.first_notification_delay * cls.interval_length
if notification_period is None:
new_t = t_wished
else:
new_t = notification_period.get_next_valid_time_from_t(t_wished)
else:
# We follow our order
new_t = t_wished
if self.is_blocking_notifications(notification_period, hosts, services,
n_type, t_wished) and \
self.first_notification_delay == 0 and self.notification_interval == 0:
# If notifications are blocked on the host/service level somehow
# and repeated notifications are not configured,
# we can silently drop this one
return
if n_type == u'PROBLEM':
# Create the notification with an incremented notification_number.
# The current_notification_number of the item itself will only
# be incremented when this notification (or its children)
# have actually been sent.
next_notif_nb = self.current_notification_number + 1
elif n_type == u'RECOVERY':
# Recovery resets the notification counter to zero
self.current_notification_number = 0
next_notif_nb = self.current_notification_number
else:
# downtime/flap/etc do not change the notification number
next_notif_nb = self.current_notification_number
data = {
'status': u'scheduled',
'type': n_type,
'command': u'VOID',
'ref': self.uuid,
't_to_go': new_t,
'timeout': cls.notification_timeout,
'notif_nb': next_notif_nb,
'host_name': getattr(self, 'host_name', ''),
'service_description': getattr(self, 'service_description', ''),
}
if author_data and n_type in [u'DOWNTIMESTART', u'DOWNTIMEEND']:
data.update(author_data)
notif = Notification(data)
logger.debug("Created a %s notification: %s", self.my_type, n_type)
# Keep a trace in our notifications queue
self.notifications_in_progress[notif.uuid] = notif
# and put it in our queue for the scheduler to pick it up
self.actions.append(notif) | [
"Create a \"master\" notification here, which will later\n (immediately before the reactionner gets it) be split up\n in many \"child\" notifications, one for each contact.\n\n :param n_type: notification type (\"PROBLEM\", \"RECOVERY\" ...)\n :type n_type: str\n :param notification_period: notification period for this host/service\n :type notification_period: alignak.objects.timeperiod.Timeperiod\n :param hosts: hosts objects, used to check if a notif is blocked\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects, used to check if a notif is blocked\n :type services: alignak.objects.service.Services\n :param t_wished: time we want to notify\n :type t_wished: int\n :param author_data: notification author data (eg. for a downtime notification)\n :type author_data: dict (containing author, author_name ad a comment)\n :return: None\n "
] |
Please provide a description of the function:def scatter_notification(self, notif, contacts, notifways, timeperiods, macromodulations,
escalations, host_ref):
# pylint: disable=too-many-locals, too-many-boolean-expressions
if notif.contact:
# only master notifications can be split up
return []
cls = self.__class__
childnotifications = []
escalated = False
notification_contacts = []
if notif.type == u'RECOVERY':
if self.first_notification_delay != 0 and not self.notified_contacts_ids:
# Recovered during first_notification_delay. No notifications
# have been sent yet, so we keep quiet
notification_contacts = []
else:
# The old way. Only send recover notifications to those contacts
# who also got problem notifications
notification_contacts = [c_id for c_id in self.notified_contacts_ids]
self.notified_contacts.clear()
self.notified_contacts_ids.clear()
else:
# Check is an escalation match. If yes, get all contacts from escalations
if self.is_escalable(notif, escalations, timeperiods):
notification_contacts = self.get_escalable_contacts(notif, escalations, timeperiods)
escalated = True
# else take normal contacts
else:
# notif_contacts = [contacts[c_id] for c_id in self.contacts]
notification_contacts = self.contacts
recipients = []
recipients_names = set()
for contact_uuid in notification_contacts:
# We do not want to notify again a contact with notification interval == 0
# if has been already notified except if the item hard state changed!
# This can happen when a service exits a downtime and it is still in
# critical/warning (and not acknowledge)
if notif.type == u'PROBLEM' and self.notification_interval == 0 \
and self.state_type == 'HARD' and self.last_state_type == self.state_type \
and self.state == self.last_state \
and contact_uuid in self.notified_contacts_ids:
# Do not send notification
continue
recipients.append(contact_uuid)
recipients_names.add(contacts[contact_uuid].contact_name)
for contact_uuid in recipients:
contact = contacts[contact_uuid]
# Get the property name for notification commands, like
# service_notification_commands for service
notif_commands = contact.get_notification_commands(notifways, cls.my_type)
for cmd in notif_commands:
data = {
'type': notif.type,
'command': u'VOID',
'command_call': cmd,
'ref': self.uuid,
'contact': contact.uuid,
'contact_name': contact.contact_name,
'recipients': ','.join(recipients_names),
't_to_go': notif.t_to_go,
'escalated': escalated,
'timeout': cls.notification_timeout,
'notif_nb': notif.notif_nb,
'reactionner_tag': cmd.reactionner_tag,
'enable_environment_macros': cmd.enable_environment_macros,
'host_name': getattr(self, 'host_name', ''),
'service_description': getattr(self, 'service_description', ''),
'author': notif.author,
'author_name': notif.author_name,
'author_alias': notif.author_alias,
'author_comment': notif.author_comment
}
child_n = Notification(data)
if not self.notification_is_blocked_by_contact(notifways, timeperiods, child_n,
contact):
# Update the notification with fresh status information
# of the item. Example: during the notification_delay
# the status of a service may have changed from WARNING to CRITICAL
self.update_notification_command(child_n, contact, macromodulations,
timeperiods, host_ref)
self.raise_notification_log_entry(child_n, contact, host_ref)
self.notifications_in_progress[child_n.uuid] = child_n
childnotifications.append(child_n)
if notif.type == u'PROBLEM':
# Remember the contacts. We might need them later in the
# recovery code some lines above
self.notified_contacts_ids.add(contact.uuid)
self.notified_contacts.add(contact.get_name())
return childnotifications | [
"In create_notifications we created a notification master (eg. a template).\n When it's time to hand it over to the reactionner, this master notification needs\n to be split in several child notifications, one for each contact\n\n To be more exact, one for each contact who is willing to accept\n notifications of this type and at this time\n\n :param notif: Notification to scatter\n :type notif: alignak.objects.notification.Notification\n :param contacts: Contacts objects, used to retreive contact for this object\n :type contacts: alignak.objects.contact.Contacts\n :param notifways: Notificationway objects, used to get notific commands\n :type notifways: alignak.object.notificationway.Notificationways\n :param timeperiods: Timeperiods objects, used to check if notif are allowed at this time\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param macromodulations: Macro modulations objects, used in the notification command\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param escalations: Esclations objects, used to get escalated contacts\n :type escalations: alignak.objects.escalation.Escalations\n :param host_ref: reference host (used for a service)\n :type host_ref: alignak.object.host.Host\n\n :return: child notifications\n :rtype: list[alignak.objects.notification.Notification]\n "
] |
Please provide a description of the function:def launch_check(self, timestamp, hosts, services, timeperiods,
macromodulations, checkmodulations, checks, ref_check=None, force=False,
dependent=False):
# pylint: disable=too-many-locals, too-many-arguments
# pylint: disable=too-many-branches, too-many-return-statements
chk = None
cls = self.__class__
# Look if we are in check or not
self.update_in_checking()
# the check is being forced, so we just replace next_chk time by now
if force and self.in_checking:
try:
c_in_progress = checks[self.checks_in_progress[0]]
c_in_progress.t_to_go = time.time()
return c_in_progress
except KeyError:
pass
# If I'm already in checking, Why launch a new check?
# If ref_check_id is not None , this is a dependency_ check
# If none, it might be a forced check, so OK, I do a new
# Dependency check, we have to create a new check that will be launched only once (now)
# Otherwise it will delay the next real check. this can lead to an infinite SOFT state.
if not force and (self.in_checking and ref_check is not None):
c_in_progress = checks[self.checks_in_progress[0]]
# c_in_progress has almost everything we need but we cant copy.deepcopy() it
# we need another c.uuid
data = {
'command': c_in_progress.command,
'timeout': c_in_progress.timeout,
'poller_tag': c_in_progress.poller_tag,
'env': c_in_progress.env,
'module_type': c_in_progress.module_type,
't_to_go': timestamp,
'depend_on_me': [ref_check],
'ref': self.uuid,
'ref_type': self.my_type,
'dependency_check': True,
'internal': self.got_business_rule or c_in_progress.command.startswith('_')
}
chk = Check(data)
self.actions.append(chk)
if os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- -> added a check action for %s (%s)",
self.get_full_name(), chk.uuid)
return chk
if force or (not self.is_no_check_dependent(hosts, services, timeperiods)):
if self.my_type == 'service' and not self.check_command:
# This should never happen because of configuration check!
logger.debug("Service check is for a service that has no check command (%s/%s), "
"do not launch the check !", self.host_name, self.service_description)
return None
if self.my_type == 'host' and not self.check_command:
if self.active_checks_enabled:
logger.debug("Host check is for an host that has no check command (%s), "
"do not launch the check !", self.host_name)
return None
# Fred : passive only checked host dependency
if dependent and self.my_type == 'host' and \
self.passive_checks_enabled and not self.active_checks_enabled:
logger.debug("Host check (dependent) is for an host that is only passively "
"checked (%s), do not launch the check !", self.host_name)
return None
# By default env is void
env = {}
poller_tag = u'None'
module_type = None
# By default we will use our default check_command
self.last_check_command = None
check_command = self.check_command
command_line = ''
if check_command:
poller_tag = check_command.poller_tag
module_type = check_command.module_type
# But if a checkway is available, use this one instead.
# Take the first available
for chkmod_id in self.checkmodulations:
chkmod = checkmodulations[chkmod_id]
c_cw = chkmod.get_check_command(timeperiods, timestamp)
if c_cw:
check_command = c_cw
break
# Get the command to launch
macroresolver = MacroResolver()
data = self.get_data_for_checks(hosts)
command_line = macroresolver.resolve_command(check_command, data,
macromodulations, timeperiods)
# remember it, for pure debugging purpose
self.last_check_command = command_line
# And get all environment variables only if needed
if cls.enable_environment_macros or (check_command and
check_command.enable_environment_macros):
env = macroresolver.get_env_macros(data)
# By default we take the global timeout, but we use the command one if it
# is defined (default is -1 for no timeout)
timeout = cls.check_timeout
if check_command and check_command.timeout != -1:
timeout = check_command.timeout
# Make the Check object and put the service in checking
# Make the check inherit poller_tag from the command
# And reactionner_tag too
data = {
'command': command_line,
'timeout': timeout,
'poller_tag': poller_tag,
'env': env,
'module_type': module_type,
't_to_go': timestamp,
'depend_on_me': [ref_check] if ref_check else [],
'ref': self.uuid,
'ref_type': self.my_type,
'internal': self.got_business_rule or command_line.startswith('_')
}
chk = Check(data)
self.checks_in_progress.append(chk.uuid)
self.update_in_checking()
# We need to put this new check in our actions queue
# so scheduler can take it
if chk is not None:
self.actions.append(chk)
if os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- -> added a check action for %s (%s)",
self.get_full_name(), chk.uuid)
return chk
# None mean I already take it into account
return None | [
"Launch a check (command)\n\n :param timestamp:\n :type timestamp: int\n :param checkmodulations: Checkmodulations objects, used to change check command if necessary\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param ref_check:\n :type ref_check:\n :param force:\n :type force: bool\n :param dependent:\n :type dependent: bool\n :return: None or alignak.check.Check\n :rtype: None | alignak.check.Check\n "
] |
Please provide a description of the function:def get_perfdata_command(self, hosts, macromodulations, timeperiods):
cls = self.__class__
if not cls.process_performance_data or not self.process_perf_data:
return
if cls.perfdata_command is not None:
macroresolver = MacroResolver()
data = self.get_data_for_event_handler(hosts)
cmd = macroresolver.resolve_command(cls.perfdata_command, data, macromodulations,
timeperiods)
reactionner_tag = cls.perfdata_command.reactionner_tag
event_h = EventHandler({
'command': cmd,
'timeout': cls.perfdata_timeout,
'ref': self.uuid,
'reactionner_tag': reactionner_tag
})
# ok we can put it in our temp action queue
self.actions.append(event_h) | [
"Add event_handler to process performance data if necessary (not disabled)\n\n :param macromodulations: Macro modulations objects, used in commands (notif, check)\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :return: None\n "
] |
Please provide a description of the function:def create_business_rules(self, hosts, services, hostgroups, servicegroups,
macromodulations, timeperiods, running=False):
# pylint: disable=too-many-locals
cmdcall = getattr(self, 'check_command', None)
# If we do not have a command, we bailout
if cmdcall is None:
return
# we get our base command, like
# bp_rule!(host,svc & host, svc) -> bp_rule
cmd = cmdcall.call
elts = cmd.split('!')
base_cmd = elts[0]
# If it's bp_rule, we got a rule :)
if base_cmd == 'bp_rule':
self.got_business_rule = True
rule = ''
if len(elts) >= 2:
rule = '!'.join(elts[1:])
# Only (re-)evaluate the business rule if it has never been
# evaluated before, or it contains a macro.
if re.match(r"\$[\w\d_-]+\$", rule) or self.business_rule is None:
macroresolver = MacroResolver()
data = self.get_data_for_checks(hosts)
rule = macroresolver.resolve_simple_macros_in_string(rule, data,
macromodulations,
timeperiods)
prev = getattr(self, "processed_business_rule", "")
if rule == prev:
# Business rule did not changed (no macro was modulated)
return
fact = DependencyNodeFactory(self)
node = fact.eval_cor_pattern(rule, hosts, services,
hostgroups, servicegroups, running)
self.processed_business_rule = rule
self.business_rule = node | [
"Create business rules if necessary (cmd contains bp_rule)\n\n :param hosts: Hosts object to look for objects\n :type hosts: alignak.objects.host.Hosts\n :param services: Services object to look for objects\n :type services: alignak.objects.service.Services\n :param running: flag used in eval_cor_pattern function\n :type running: bool\n :return: None\n "
] |
Please provide a description of the function:def get_business_rule_output(self, hosts, services, macromodulations, timeperiods):
# pylint: disable=too-many-locals, too-many-branches
got_business_rule = getattr(self, 'got_business_rule', False)
# Checks that the service is a business rule.
if got_business_rule is False or self.business_rule is None:
return ""
# Checks that the business rule has a format specified.
output_template = self.business_rule_output_template
if not output_template:
return ""
macroresolver = MacroResolver()
# Extracts children template strings
elts = re.findall(r"\$\((.*)\)\$", output_template)
if not elts:
child_template_string = ""
else:
child_template_string = elts[0]
# Processes child services output
children_output = ""
ok_count = 0
# Expands child items format string macros.
items = self.business_rule.list_all_elements()
for item_uuid in items:
if item_uuid in hosts:
item = hosts[item_uuid]
elif item_uuid in services:
item = services[item_uuid]
# Do not display children in OK state
# todo: last_hard_state ? why not current state if state type is hard ?
if item.last_hard_state_id == 0:
ok_count += 1
continue
data = item.get_data_for_checks(hosts)
children_output += macroresolver.resolve_simple_macros_in_string(child_template_string,
data,
macromodulations,
timeperiods)
if ok_count == len(items):
children_output = "all checks were successful."
# Replaces children output string
template_string = re.sub(r"\$\(.*\)\$", children_output, output_template)
data = self.get_data_for_checks(hosts)
output = macroresolver.resolve_simple_macros_in_string(template_string, data,
macromodulations, timeperiods)
return output.strip() | [
"\n Returns a status string for business rules based items formatted\n using business_rule_output_template attribute as template.\n\n The template may embed output formatting for itself, and for its child\n (dependent) items. Child format string is expanded into the $( and )$,\n using the string between brackets as format string.\n\n Any business rule based item or child macro may be used. In addition,\n the $STATUS$, $SHORTSTATUS$ and $FULLNAME$ macro which name is common\n to hosts and services may be used to ease template writing.\n\n Caution: only children in state not OK are displayed.\n\n Example:\n A business rule with a format string looking like\n \"$STATUS$ [ $($TATUS$: $HOSTNAME$,$SERVICEDESC$ )$ ]\"\n Would return\n \"CRITICAL [ CRITICAL: host1,srv1 WARNING: host2,srv2 ]\"\n\n :param hosts: Hosts object to look for objects\n :type hosts: alignak.objects.host.Hosts\n :param services: Services object to look for objects\n :type services: alignak.objects.service.Services\n :param macromodulations: Macromodulations object to look for objects\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param timeperiods: Timeperiods object to look for objects\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: status for business rules\n :rtype: str\n "
] |
Please provide a description of the function:def fill_data_brok_from(self, data, brok_type):
super(SchedulingItem, self).fill_data_brok_from(data, brok_type)
# workaround/easy trick to have the command_name of this
# SchedulingItem in its check_result brok
if brok_type == 'check_result':
data['command_name'] = ''
if self.check_command:
data['command_name'] = self.check_command.command.command_name | [
"Fill data brok dependent on the brok_type\n\n :param data: data to fill\n :type data: dict\n :param brok_type: brok type\n :type: str\n :return: None\n "
] |
Please provide a description of the function:def acknowledge_problem(self, notification_period, hosts, services, sticky, notify, author,
comment, end_time=0):
# pylint: disable=too-many-arguments
comm = None
logger.debug("Acknowledge requested for %s %s.", self.my_type, self.get_name())
if self.state != self.ok_up:
# case have yet an acknowledge
if self.problem_has_been_acknowledged and self.acknowledgement:
self.del_comment(getattr(self.acknowledgement, 'comment_id', None))
if notify:
self.create_notifications('ACKNOWLEDGEMENT',
notification_period, hosts, services)
self.problem_has_been_acknowledged = True
sticky = sticky == 2
data = {
'ref': self.uuid, 'sticky': sticky, 'author': author, 'comment': comment,
'end_time': end_time, 'notify': notify
}
self.acknowledgement = Acknowledge(data)
if self.my_type == 'host':
comment_type = 1
self.broks.append(self.acknowledgement.get_raise_brok(self.get_name()))
else:
comment_type = 2
self.broks.append(self.acknowledgement.get_raise_brok(self.host_name,
self.get_name()))
data = {
'author': author, 'comment': comment, 'comment_type': comment_type, 'entry_type': 4,
'source': 0, 'expires': False, 'ref': self.uuid
}
comm = Comment(data)
self.acknowledgement.comment_id = comm.uuid
self.comments[comm.uuid] = comm
self.broks.append(self.get_update_status_brok())
self.raise_acknowledge_log_entry()
else:
logger.debug("Acknowledge requested for %s %s but element state is OK/UP.",
self.my_type, self.get_name())
# For an host, acknowledge all its services that are problems
if self.my_type == 'host':
for service_uuid in self.services:
if service_uuid not in services:
continue
services[service_uuid].acknowledge_problem(notification_period, hosts, services,
sticky, notify, author, comment,
end_time)
return comm | [
"\n Add an acknowledge\n\n :param sticky: acknowledge will be always present is host return in UP state\n :type sticky: integer\n :param notify: if to 1, send a notification\n :type notify: integer\n :param author: name of the author or the acknowledge\n :type author: str\n :param comment: comment (description) of the acknowledge\n :type comment: str\n :param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end)\n :type end_time: int\n :return: None | alignak.comment.Comment\n "
] |
Please provide a description of the function:def check_for_expire_acknowledge(self):
if (self.acknowledgement and
self.acknowledgement.end_time != 0 and
self.acknowledgement.end_time < time.time()):
self.unacknowledge_problem() | [
"\n If have acknowledge and is expired, delete it\n\n :return: None\n "
] |
Please provide a description of the function:def unacknowledge_problem(self):
if self.problem_has_been_acknowledged:
logger.debug("[item::%s] deleting acknowledge of %s",
self.get_name(),
self.get_full_name())
self.problem_has_been_acknowledged = False
if self.my_type == 'host':
self.broks.append(self.acknowledgement.get_expire_brok(self.get_name()))
else:
self.broks.append(self.acknowledgement.get_expire_brok(self.host_name,
self.get_name()))
# delete the comment of the item related with the acknowledge
if hasattr(self.acknowledgement, 'comment_id') and \
self.acknowledgement.comment_id in self.comments:
del self.comments[self.acknowledgement.comment_id]
# Should not be deleted, a None is Good
self.acknowledgement = None
self.broks.append(self.get_update_status_brok())
self.raise_unacknowledge_log_entry() | [
"\n Remove the acknowledge, reset the flag. The comment is deleted\n\n :return: None\n "
] |
Please provide a description of the function:def unacknowledge_problem_if_not_sticky(self):
if hasattr(self, 'acknowledgement') and self.acknowledgement is not None:
if not self.acknowledgement.sticky:
self.unacknowledge_problem() | [
"\n Remove the acknowledge if it is not sticky\n\n :return: None\n "
] |
Please provide a description of the function:def raise_freshness_log_entry(self, t_stale_by):
logger.warning("The freshness period of %s '%s' is expired by %ss "
"(threshold=%ss + %ss). Attempt: %s / %s. "
"I'm forcing the state to freshness state (%s / %s).",
self.my_type, self.get_full_name(),
t_stale_by, self.freshness_threshold,
self.additional_freshness_latency,
self.attempt, self.max_check_attempts,
self.freshness_state, self.state_type) | [
"Raise freshness alert entry (warning level)\n\n Example : \"The freshness period of host 'host_name' is expired\n by 0d 0h 17m 6s (threshold=0d 1h 0m 0s).\n Attempt: 1 / 1.\n I'm forcing the state to freshness state (d / HARD)\"\n\n :param t_stale_by: time in seconds the host has been in a stale state\n :type t_stale_by: int\n :return: None\n "
] |
Please provide a description of the function:def set_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change:
logger.debug("%s is impacted and goes UNREACHABLE", self)
# Track the old state (problem occured before a new check)
self.state_before_impact = self.state
self.state_id_before_impact = self.state_id
# This flag will know if we override the impact state
self.state_changed_since_impact = False
# Set unreachable
self.set_unreachable() | [
"We just go an impact, so we go unreachable\n But only if we enable this state change in the conf\n\n :return: None\n "
] |
Please provide a description of the function:def unset_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
self.state = self.state_before_impact
self.state_id = self.state_id_before_impact | [
"Unset impact, only if impact state change is set in configuration\n\n :return: None\n "
] |
Please provide a description of the function:def set_unreachable(self):
self.state_id = 4
self.state = u'UNREACHABLE'
self.last_time_unreachable = int(time.time()) | [
"Set unreachable: all our parents (dependencies) are not ok\n Unreachable is different from down/critical\n\n :return:None\n "
] |
Please provide a description of the function:def is_correct(self):
# pylint: disable=too-many-branches
state = True
if hasattr(self, 'trigger') and getattr(self, 'trigger', None):
self.add_warning("[%s::%s] 'trigger' property is not allowed"
% (self.my_type, self.get_name()))
# If no notif period, set it to None, mean 24x7
if not hasattr(self, 'notification_period'):
self.notification_period = None
# If freshness_threshold is not set, use check interval or retry interval
if hasattr(self, 'freshness_threshold') and not self.freshness_threshold:
if getattr(self, 'check_interval', 0):
self.freshness_threshold = self.check_interval * 60
# self.add_warning("[%s::%s] using check interval as a freshness threshold: %d s"
# % (self.my_type, self.get_name(), self.freshness_threshold))
elif getattr(self, 'retry_interval', 0):
self.freshness_threshold = self.retry_interval * 60
# self.add_warning("[%s::%s] using retry interval as a freshness threshold: %d s"
# % (self.my_type, self.get_name(), self.freshness_threshold))
# If we got an event handler, it should be valid
if getattr(self, 'event_handler', None) and not self.event_handler.is_valid():
self.add_error("[%s::%s] event_handler '%s' is invalid"
% (self.my_type, self.get_name(), self.event_handler.command))
state = False
if not hasattr(self, 'check_command'):
# todo: This should never happen because the default exists as an empty string
self.add_error("[%s::%s] no property check_command" % (self.my_type, self.get_name()))
state = False
# Ok got a command, but maybe it's invalid
else:
# if not self.check_command:
# self.add_warning("[%s::%s] no check_command, will always be considered as Up"
# % (self.my_type, self.get_name()))
if self.check_command and not self.check_command.is_valid():
self.add_error("[%s::%s] check_command '%s' invalid"
% (self.my_type, self.get_name(), self.check_command.command))
state = False
if self.got_business_rule:
if not self.business_rule.is_valid():
self.add_error("[%s::%s] business_rule invalid"
% (self.my_type, self.get_name()))
for bperror in self.business_rule.configuration_errors:
self.add_error("[%s::%s]: %s" % (self.my_type, self.get_name(), bperror))
state = False
if not hasattr(self, 'notification_interval') \
and self.notifications_enabled is True: # pragma: no cover, should never happen
self.add_error("[%s::%s] no notification_interval but notifications enabled"
% (self.my_type, self.get_name()))
state = False
# if no check_period, means 24x7, like for services
if not hasattr(self, 'check_period'):
self.check_period = None
state = super(SchedulingItem, self).is_correct()
return state | [
"\n Check if this object configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def find_by_filter(self, filters, all_items):
items = []
for i in self:
failed = False
if hasattr(i, "host"):
all_items["service"] = i
else:
all_items["host"] = i
for filt in filters:
if not filt(all_items):
failed = True
break
if failed is False:
items.append(i)
return items | [
"\n Find items by filters\n\n :param filters: list of filters\n :type filters: list\n :param all_items: monitoring items\n :type: dict\n :return: list of items\n :rtype: list\n "
] |
Please provide a description of the function:def add_act_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period,
inherits_parents):
if son_id in self:
son = self[son_id]
else:
msg = "Dependency son (%s) unknown, configuration error" % son_id
self.add_error(msg)
parent = self[parent_id]
son.act_depend_of.append((parent_id, notif_failure_criteria, dep_period, inherits_parents))
parent.act_depend_of_me.append((son_id, notif_failure_criteria, dep_period,
inherits_parents))
# TODO: Is it necessary? We already have this info in act_depend_* attributes
son.parent_dependencies.add(parent_id)
parent.child_dependencies.add(son_id) | [
"\n Add a logical dependency for actions between two hosts or services.\n\n :param son_id: uuid of son host\n :type son_id: str\n :param parent_id: uuid of parent host\n :type parent_id: str\n :param notif_failure_criteria: notification failure criteria,\n notification for a dependent host may vary\n :type notif_failure_criteria: list\n :param dep_period: dependency period. Timeperiod for dependency may vary\n :type dep_period: str | None\n :param inherits_parents: if this dep will inherit from parents (timeperiod, status)\n :type inherits_parents: bool\n :return:\n "
] |
Please provide a description of the function:def del_act_dependency(self, son_id, parent_id): # pragma: no cover, not yet tested
son = self[son_id]
parent = self[parent_id]
to_del = []
# First we remove in my list
for (host, status, timeperiod, inherits_parent) in son.act_depend_of:
if host == parent_id:
to_del.append((host, status, timeperiod, inherits_parent))
for tup in to_del:
son.act_depend_of.remove(tup)
# And now in the father part
to_del = []
for (host, status, timeperiod, inherits_parent) in parent.act_depend_of_me:
if host == son_id:
to_del.append((host, status, timeperiod, inherits_parent))
for tup in to_del:
parent.act_depend_of_me.remove(tup)
# Remove in child/parents dependencies too
# Me in father list
parent.child_dependencies.remove(son_id)
# and father list in mine
son.parent_dependencies.remove(parent_id) | [
"Remove act_dependency between two hosts or services.\n\n TODO: do we really intend to remove dynamically ?\n\n :param son_id: uuid of son host/service\n :type son_id: str\n :param parent_id: uuid of parent host/service\n :type parent_id: str\n :return: None\n "
] |
Please provide a description of the function:def add_chk_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period,
inherits_parents):
son = self[son_id]
parent = self[parent_id]
son.chk_depend_of.append((parent_id, notif_failure_criteria, 'logic_dep', dep_period,
inherits_parents))
parent.chk_depend_of_me.append((son_id, notif_failure_criteria, 'logic_dep', dep_period,
inherits_parents))
# TODO: Is it necessary? We already have this info in act_depend_* attributes
son.parent_dependencies.add(parent_id)
parent.child_dependencies.add(son_id) | [
"\n Add a logical dependency for checks between two hosts or services.\n\n :param son_id: uuid of son host/service\n :type son_id: str\n :param parent_id: uuid of parent host/service\n :type parent_id: str\n :param notif_failure_criteria: notification failure criteria,\n notification for a dependent host may vary\n :type notif_failure_criteria: list\n :param dep_period: dependency period. Timeperiod for dependency may vary\n :type dep_period: str\n :param inherits_parents: if this dep will inherit from parents (timeperiod, status)\n :type inherits_parents: bool\n :return:\n "
] |
Please provide a description of the function:def create_business_rules(self, hosts, services, hostgroups, servicegroups,
macromodulations, timeperiods):
for item in self:
item.create_business_rules(hosts, services, hostgroups,
servicegroups, macromodulations, timeperiods) | [
"\n Loop on hosts or services and call SchedulingItem.create_business_rules\n\n :param hosts: hosts to link to\n :type hosts: alignak.objects.host.Hosts\n :param services: services to link to\n :type services: alignak.objects.service.Services\n :param hostgroups: hostgroups to link to\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :param servicegroups: servicegroups to link to\n :type servicegroups: alignak.objects.servicegroup.Servicegroups\n :param macromodulations: macromodulations to link to\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :param timeperiods: timeperiods to link to\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: None\n "
] |
Please provide a description of the function:def get_services_by_explosion(self, servicegroups):
# pylint: disable=access-member-before-definition
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
servicegroup = servicegroups.find_by_name(sg_mbr.strip())
if servicegroup is not None:
value = servicegroup.get_services_by_explosion(servicegroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return '' | [
"\n Get all services of this servicegroup and add it in members container\n\n :param servicegroups: servicegroups object\n :type servicegroups: alignak.objects.servicegroup.Servicegroups\n :return: return empty string or list of members\n :rtype: str or list\n "
] |
Please provide a description of the function:def add_member(self, service_name, servicegroup_name):
servicegroup = self.find_by_name(servicegroup_name)
if not servicegroup:
servicegroup = Servicegroup({'servicegroup_name': servicegroup_name,
'alias': servicegroup_name,
'members': service_name})
self.add(servicegroup)
else:
servicegroup.add_members(service_name) | [
"Add a member (service) to this servicegroup\n\n :param service_name: member (service) name\n :type service_name: str\n :param servicegroup_name: servicegroup name\n :type servicegroup_name: str\n :return: None\n "
] |
Please provide a description of the function:def get_members_of_group(self, gname):
hostgroup = self.find_by_name(gname)
if hostgroup:
return hostgroup.get_services()
return [] | [
"Get all members of a group which name is given in parameter\n\n :param gname: name of the group\n :type gname: str\n :return: list of the services in the group\n :rtype: list[alignak.objects.service.Service]\n "
] |
Please provide a description of the function:def linkify_servicegroups_services(self, hosts, services):
for servicegroup in self:
mbrs = servicegroup.get_services()
# The new member list, in id
new_mbrs = []
seek = 0
host_name = ''
if len(mbrs) == 1 and mbrs[0] != '':
servicegroup.add_unknown_members('%s' % mbrs[0])
for mbr in mbrs:
if not mbr:
continue
if seek % 2 == 0:
host_name = mbr.strip()
else:
service_desc = mbr.strip()
find = services.find_srv_by_name_and_hostname(host_name, service_desc)
if find is not None:
new_mbrs.append(find.uuid)
else:
host = hosts.find_by_name(host_name)
if not (host and host.is_excluded_for_sdesc(service_desc)):
servicegroup.add_unknown_members('%s,%s' % (host_name, service_desc))
elif host:
self.add_warning('servicegroup %r : %s is excluded from the '
'services of the host %s'
% (servicegroup, service_desc, host_name))
seek += 1
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
servicegroup.replace_members(new_mbrs)
for srv_id in servicegroup.members:
serv = services[srv_id]
serv.servicegroups.append(servicegroup.uuid)
# and make this uniq
serv.servicegroups = list(set(serv.servicegroups)) | [
"\n We just search for each host the id of the host\n and replace the name by the id\n TODO: very slow for high services, so search with host list,\n not service one\n\n :param hosts: hosts object\n :type hosts: alignak.objects.host.Hosts\n :param services: services object\n :type services: alignak.objects.service.Services\n :return: None\n "
] |
Please provide a description of the function:def explode(self):
# We do not want a same service group to be exploded again and again
# so we tag it
for tmp_sg in list(self.items.values()):
tmp_sg.already_exploded = False
for servicegroup in list(self.items.values()):
if servicegroup.already_exploded:
continue
# get_services_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_sg in list(self.items.values()):
tmp_sg.rec_tag = False
servicegroup.get_services_by_explosion(self)
# We clean the tags
for tmp_sg in list(self.items.values()):
if hasattr(tmp_sg, 'rec_tag'):
del tmp_sg.rec_tag
del tmp_sg.already_exploded | [
"\n Get services and put them in members container\n\n :return: None\n "
] |
Please provide a description of the function:def main():
try:
args = parse_daemon_args(True)
# Protect for windows multiprocessing that will RELAUNCH all
while True:
daemon = Arbiter(**args.__dict__)
daemon.main()
if not daemon.need_config_reload:
break
daemon = None
except Exception as exp: # pylint: disable=broad-except
sys.stderr.write("*** Daemon exited because: %s" % str(exp))
traceback.print_exc()
exit(1) | [
"Parse args and run main daemon function\n\n :return: None\n "
] |
Please provide a description of the function:def setup_logger(logger_configuration_file, log_dir=None, process_name='', log_file=''):
# pylint: disable=too-many-branches
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not process_name:
break
# Logger is already configured?
if getattr(handler, '_name', None) == 'daemons':
# Update the declared formats and file names with the process name
# This is for unit tests purpose only: alignak_tests will be replaced
# with the provided process name
for hdlr in logger_.handlers:
# print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt))
if 'alignak_tests' in hdlr.formatter._fmt:
formatter = logging.Formatter(hdlr.formatter._fmt.replace("alignak_tests",
process_name))
hdlr.setFormatter(formatter)
if getattr(hdlr, 'filename', None) and 'alignak_tests' in hdlr.filename:
hdlr.filename = hdlr.filename._fmt.replace("alignak_tests", process_name)
# print("- handler : %s (%s) -> %s" % (hdlr, hdlr.formatter._fmt,
# hdlr.filename))
# else:
# print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt))
break
else:
if not logger_configuration_file or not os.path.exists(logger_configuration_file):
print("The logger configuration file does not exist: %s" % logger_configuration_file)
return
with open(logger_configuration_file, 'rt') as _file:
config = json.load(_file)
truncate = False
if not process_name and not log_dir:
truncate = True
if not process_name:
process_name = 'alignak_tests'
if not log_dir:
log_dir = '/tmp'
# Update the declared formats with the process name
for formatter in config['formatters']:
if 'format' not in config['formatters'][formatter]:
continue
config['formatters'][formatter]['format'] = \
config['formatters'][formatter]['format'].replace("%(daemon)s", process_name)
# Update the declared log file names with the log directory
for hdlr in config['handlers']:
if 'filename' not in config['handlers'][hdlr]:
continue
if log_file and hdlr == 'daemons':
config['handlers'][hdlr]['filename'] = log_file
else:
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("%(logdir)s", log_dir)
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("%(daemon)s", process_name)
if truncate and os.path.exists(config['handlers'][hdlr]['filename']):
with open(config['handlers'][hdlr]['filename'], "w") as file_log_file:
file_log_file.truncate()
# Configure the logger, any error will raise an exception
logger_dictConfig(config) | [
"\n Configure the provided logger\n - get and update the content of the Json configuration file\n - configure the logger with this file\n\n If a log_dir and process_name are provided, the format and filename in the configuration file\n are updated with the provided values if they contain the patterns %(logdir)s and %(daemon)s\n\n If no log_dir and process_name are provided, this function will truncate the log file\n defined in the configuration file.\n\n If a log file name is provided, it will override the default defined log file name.\n\n At first, this function checks if the logger is still existing and initialized to\n update the handlers and formatters. This mainly happens during the unit tests.\n\n :param logger_configuration_file: Python Json logger configuration file\n :rtype logger_configuration_file: str\n :param log_dir: default log directory to update the defined logging handlers\n :rtype log_dir: str\n :param process_name: process name to update the defined logging formatters\n :rtype process_name: str\n :param log_file: log file name to update the defined log file\n :rtype log_file: str\n :return: None\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.