Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def get_overall_state(self, services):
overall_state = 0
if not self.monitored:
overall_state = 5
elif self.acknowledged:
overall_state = 1
elif self.downtimed:
overall_state = 2
elif self.state_type == 'HARD':
if self.state == 'UNREACHABLE':
overall_state = 3
elif self.state == 'DOWN':
overall_state = 4
# Only consider the hosts services state if all is ok (or almost...)
if overall_state <= 2:
for service in self.services:
if service in services:
service = services[service]
# Only for monitored services
if service.overall_state_id < 5:
overall_state = max(overall_state, service.overall_state_id)
return overall_state | [
"Get the host overall state including the host self status\n and the status of its services\n\n Compute the host overall state identifier, including:\n - the acknowledged state\n - the downtime state\n\n The host overall state is (prioritized):\n - an host not monitored (5)\n - an host down (4)\n - an host unreachable (3)\n - an host downtimed (2)\n - an host acknowledged (1)\n - an host up (0)\n\n If the host overall state is <= 2, then the host overall state is the maximum value\n of the host overall state and all the host services overall states.\n\n The overall state of an host is:\n - 0 if the host is UP and all its services are OK\n - 1 if the host is DOWN or UNREACHABLE and acknowledged or\n at least one of its services is acknowledged and\n no other services are WARNING or CRITICAL\n - 2 if the host is DOWN or UNREACHABLE and in a scheduled downtime or\n at least one of its services is in a scheduled downtime and no\n other services are WARNING or CRITICAL\n - 3 if the host is UNREACHABLE or\n at least one of its services is WARNING\n - 4 if the host is DOWN or\n at least one of its services is CRITICAL\n - 5 if the host is not monitored\n\n :param services: a list of known services\n :type services: alignak.objects.service.Services\n\n :return: the host overall state\n :rtype: int\n "
] |
Please provide a description of the function:def linkify(self, timeperiods=None, commands=None, contacts=None, # pylint: disable=R0913
realms=None, resultmodulations=None, businessimpactmodulations=None,
escalations=None, hostgroups=None,
checkmodulations=None, macromodulations=None):
self.linkify_with_timeperiods(timeperiods, 'notification_period')
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
self.linkify_with_timeperiods(timeperiods, 'snapshot_period')
self.linkify_h_by_h()
self.linkify_h_by_hg(hostgroups)
self.linkify_one_command_with_commands(commands, 'check_command')
self.linkify_one_command_with_commands(commands, 'event_handler')
self.linkify_one_command_with_commands(commands, 'snapshot_command')
self.linkify_with_contacts(contacts)
# No more necessary
self.linkify_h_by_realms(realms)
self.linkify_with_resultmodulations(resultmodulations)
self.linkify_with_business_impact_modulations(businessimpactmodulations)
# WARNING: all escalations will not be link here
# (just the escalation here, not serviceesca or hostesca).
# This last one will be link in escalations linkify.
self.linkify_with_escalations(escalations)
self.linkify_with_checkmodulations(checkmodulations)
self.linkify_with_macromodulations(macromodulations) | [
"Create link between objects::\n\n * hosts -> timeperiods\n * hosts -> hosts (parents, etc)\n * hosts -> commands (check_command)\n * hosts -> contacts\n\n :param timeperiods: timeperiods to link\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param commands: commands to link\n :type commands: alignak.objects.command.Commands\n :param contacts: contacts to link\n :type contacts: alignak.objects.contact.Contacts\n :param realms: realms to link\n :type realms: alignak.objects.realm.Realms\n :param resultmodulations: resultmodulations to link\n :type resultmodulations: alignak.objects.resultmodulation.Resultmodulations\n :param businessimpactmodulations: businessimpactmodulations to link\n :type businessimpactmodulations:\n alignak.objects.businessimpactmodulation.Businessimpactmodulations\n :param escalations: escalations to link\n :type escalations: alignak.objects.escalation.Escalations\n :param hostgroups: hostgroups to link\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :param checkmodulations: checkmodulations to link\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param macromodulations: macromodulations to link\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :return: None\n "
] |
Please provide a description of the function:def linkify_h_by_h(self):
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid)
else:
err = "the parent '%s' for the host '%s' is unknown!" % (parent,
host.get_name())
self.add_error(err)
# We find the id, we replace the names
host.parents = new_parents | [
"Link hosts with their parents\n\n :return: None\n "
] |
Please provide a description of the function:def linkify_h_by_realms(self, realms):
default_realm = realms.get_default()
for host in self:
if not getattr(host, 'realm', None):
# Applying default realm to an host
host.realm = default_realm.uuid if default_realm else ''
host.realm_name = default_realm.get_name() if default_realm else ''
host.got_default_realm = True
if host.realm not in realms:
realm = realms.find_by_name(host.realm)
if not realm:
continue
host.realm = realm.uuid
else:
realm = realms[host.realm] | [
"Link hosts with realms\n\n :param realms: realms object to link with\n :type realms: alignak.objects.realm.Realms\n :return: None\n "
] |
Please provide a description of the function:def linkify_h_by_hg(self, hostgroups):
# Register host in the hostgroups
for host in self:
new_hostgroups = []
if hasattr(host, 'hostgroups') and host.hostgroups != []:
hgs = [n.strip() for n in host.hostgroups if n.strip()]
for hg_name in hgs:
# TODO: should an unknown hostgroup raise an error ?
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is not None:
new_hostgroups.append(hostgroup.uuid)
else:
err = ("the hostgroup '%s' of the host '%s' is "
"unknown" % (hg_name, host.host_name))
host.add_error(err)
host.hostgroups = new_hostgroups | [
"Link hosts with hostgroups\n\n :param hostgroups: hostgroups object to link with\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :return: None\n "
] |
Please provide a description of the function:def explode(self, hostgroups, contactgroups):
for template in list(self.templates.values()):
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(template, contactgroups)
# Register host in the hostgroups
for host in self:
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(host, contactgroups)
if hasattr(host, 'host_name') and hasattr(host, 'hostgroups'):
hname = host.host_name
for hostgroup in host.hostgroups:
hostgroups.add_member(hname, hostgroup.strip()) | [
"Explode hosts with hostgroups, contactgroups::\n\n * Add contact from contactgroups to host contacts\n * Add host into their hostgroups as hostgroup members\n\n :param hostgroups: Hostgroups to explode\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :param contactgroups: Contactgorups to explode\n :type contactgroups: alignak.objects.contactgroup.Contactgroups\n :return: None\n "
] |
Please provide a description of the function:def apply_dependencies(self):
for host in self:
for parent_id in getattr(host, 'parents', []):
if parent_id is None:
continue
parent = self[parent_id]
if parent.active_checks_enabled:
# Add parent in the list
host.act_depend_of.append((parent_id, ['d', 'x', 's', 'f'], '', True))
# Add child in the parent
parent.act_depend_of_me.append((host.uuid, ['d', 'x', 's', 'f'], '', True))
# And add the parent/child dep filling too, for broking
parent.child_dependencies.add(host.uuid)
host.parent_dependencies.add(parent_id) | [
"Loop on hosts and register dependency between parent and son\n\n call Host.fill_parents_dependency()\n\n :return: None\n "
] |
Please provide a description of the function:def find_hosts_that_use_template(self, tpl_name):
return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")] | [
"Find hosts that use the template defined in argument tpl_name\n\n :param tpl_name: the template name we filter or\n :type tpl_name: str\n :return: list of the host_name of the hosts that got the template tpl_name in tags\n :rtype: list[str]\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
# Internal checks before executing inherited function...
loop = self.no_loop_in_parents("self", "parents")
if loop:
self.add_error("Loop detected while checking hosts")
state = False
for uuid, item in list(self.items.items()):
for elem in loop:
if elem == uuid:
self.add_error("Host %s is parent in dependency defined in %s"
% (item.get_name(), item.imported_from))
elif elem in item.parents:
self.add_error("Host %s is child in dependency defined in %s"
% (self[elem].get_name(), self[elem].imported_from))
return super(Hosts, self).is_correct() and state | [
"Check if the hosts list configuration is correct ::\n\n * check if any loop exists in each host dependencies\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def is_me(self): # pragma: no cover, seems not to be used anywhere
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname() | [
"Check if parameter name if same than name of this object\n\n TODO: is it useful?\n\n :return: true if parameter name if same than this name\n :rtype: bool\n "
] |
Please provide a description of the function:def do_not_run(self):
logger.debug("[%s] do_not_run", self.name)
try:
self.con.get('_do_not_run')
return True
except HTTPClientConnectionException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection error when "
"sending do not run: %s" % str(exp))
self.set_dead()
except HTTPClientTimeoutException as exp: # pragma: no cover, simple protection
self.add_failed_check_attempt("Connection timeout when "
"sending do not run: %s" % str(exp))
except HTTPClientException as exp:
self.add_failed_check_attempt("Error when "
"sending do not run: %s" % str(exp))
return False | [
"Check if satellite running or not\n If not, try to run\n\n :return: true if satellite not running\n :rtype: bool\n "
] |
Please provide a description of the function:def get_broks(self, broker_name):
logger.debug("Broker %s requests my broks list", broker_name)
res = []
if not broker_name:
return res
for broker_link in list(self.brokers.values()):
if broker_name == broker_link.name:
for brok in sorted(broker_link.broks, key=lambda x: x.creation_time):
# Only provide broks that did not yet sent to our external modules
if getattr(brok, 'sent_to_externals', False):
res.append(brok)
brok.got = True
broker_link.broks = [b for b in broker_link.broks if not getattr(b, 'got', False)]
logger.debug("Providing %d broks to %s", len(res), broker_name)
break
else:
logger.warning("Got a brok request from an unknown broker: %s", broker_name)
return res | [
"Send broks to a specific broker\n\n :param broker_name: broker name to send broks\n :type broker_name: str\n :greturn: dict of brok for this broker\n :rtype: dict[alignak.brok.Brok]\n "
] |
Please provide a description of the function:def compensate_system_time_change(self, difference): # pragma: no cover,
# pylint: disable=too-many-branches
# not with unit tests
super(Alignak, self).compensate_system_time_change(difference)
# We only need to change some value
self.program_start = max(0, self.program_start + difference)
if not hasattr(self.sched, "conf"):
# Race condition where time change before getting conf
return
# Then we compensate all host/services
for host in self.sched.hosts:
host.compensate_system_time_change(difference)
for serv in self.sched.services:
serv.compensate_system_time_change(difference)
# Now all checks and actions
for chk in list(self.sched.checks.values()):
# Already launch checks should not be touch
if chk.status == u'scheduled' and chk.t_to_go is not None:
t_to_go = chk.t_to_go
ref = self.sched.find_item_by_id(chk.ref)
new_t = max(0, t_to_go + difference)
timeperiod = self.sched.timeperiods[ref.check_period]
if timeperiod is not None:
# But it's no so simple, we must match the timeperiod
new_t = timeperiod.get_next_valid_time_from_t(new_t)
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
chk.state = u'waitconsume'
chk.exit_status = 2
chk.output = '(Error: there is no available check time after time change!)'
chk.check_time = time.time()
chk.execution_time = 0
else:
chk.t_to_go = new_t
ref.next_chk = new_t
# Now all checks and actions
for act in list(self.sched.actions.values()):
# Already launch checks should not be touch
if act.status == u'scheduled':
t_to_go = act.t_to_go
# Event handler do not have ref
ref_id = getattr(act, 'ref', None)
new_t = max(0, t_to_go + difference)
# Notification should be check with notification_period
if act.is_a == u'notification':
ref = self.sched.find_item_by_id(ref_id)
if ref.notification_period:
# But it's no so simple, we must match the timeperiod
notification_period = self.sched.timeperiods[ref.notification_period]
new_t = notification_period.get_next_valid_time_from_t(new_t)
# And got a creation_time variable too
act.creation_time += difference
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
act.state = 'waitconsume'
act.exit_status = 2
act.output = '(Error: there is no available check time after time change!)'
act.check_time = time.time()
act.execution_time = 0
else:
act.t_to_go = new_t | [
"Compensate a system time change of difference for all hosts/services/checks/notifs\n\n :param difference: difference in seconds\n :type difference: int\n :return: None\n "
] |
Please provide a description of the function:def do_loop_turn(self):
if not self.first_scheduling:
# Ok, now all is initialized, we can make the initial broks
logger.info("First scheduling launched")
_t0 = time.time()
# Program start brok
self.sched.initial_program_status()
# First scheduling
self.sched.schedule()
statsmgr.timer('first_scheduling', time.time() - _t0)
logger.info("First scheduling done")
# Connect to our passive satellites if needed
for satellite in [s for s in list(self.pollers.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
for satellite in [s for s in list(self.reactionners.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
# Ticks are for recurrent function call like consume, del zombies etc
self.sched.ticks = 0
self.first_scheduling = True
# Each loop turn, execute the daemon specific treatment...
# only if the daemon has a configuration to manage
if self.sched.pushed_conf:
# If scheduling is not yet enabled, enable scheduling
if not self.sched.must_schedule:
self.sched.start_scheduling()
self.sched.before_run()
self.sched.run()
else:
logger.warning("#%d - No monitoring configuration to scheduler...",
self.loop_count) | [
"Scheduler loop turn\n\n Simply run the Alignak scheduler loop\n\n This is called when a configuration got received by the scheduler daemon. As of it,\n check if the first scheduling has been done... and manage this.\n\n :return: None\n "
] |
Please provide a description of the function:def get_managed_configurations(self):
# for scheduler_link in list(self.schedulers.values()):
# res[scheduler_link.instance_id] = {
# 'hash': scheduler_link.hash,
# 'push_flavor': scheduler_link.push_flavor,
# 'managed_conf_id': scheduler_link.managed_conf_id
# }
res = {}
if self.sched.pushed_conf and self.cur_conf and 'instance_id' in self.cur_conf:
res[self.cur_conf['instance_id']] = {
'hash': self.cur_conf['hash'],
'push_flavor': self.cur_conf['push_flavor'],
'managed_conf_id': self.cur_conf['managed_conf_id']
}
logger.debug("Get managed configuration: %s", res)
return res | [
"Get the configurations managed by this scheduler\n\n The configuration managed by a scheduler is the self configuration got\n by the scheduler during the dispatching.\n\n :return: a dict of scheduler links with instance_id as key and\n hash, push_flavor and configuration identifier as values\n :rtype: dict\n "
] |
Please provide a description of the function:def setup_new_conf(self):
# pylint: disable=too-many-statements, too-many-branches, too-many-locals
# Execute the base class treatment...
super(Alignak, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
logger.debug("Got config: %s", self.cur_conf)
if 'conf_part' not in self.cur_conf:
self.cur_conf['conf_part'] = None
conf_part = self.cur_conf['conf_part']
# Ok now we can save the retention data
if self.sched.pushed_conf is not None:
self.sched.update_retention()
# Get the monitored objects configuration
t00 = time.time()
received_conf_part = None
try:
received_conf_part = unserialize(conf_part)
assert received_conf_part is not None
except AssertionError as exp:
# This to indicate that no configuration is managed by this scheduler...
logger.warning("No managed configuration received from arbiter")
except AlignakClassLookupException as exp: # pragma: no cover
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter",
"_error": str(exp)
}
logger.error(self.new_conf)
logger.error("Back trace of the error:\n%s", traceback.format_exc())
return
except Exception as exp: # pylint: disable=broad-except
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter",
"_error": str(exp)
}
logger.error(self.new_conf)
self.exit_on_exception(exp, str(self.new_conf))
# if not received_conf_part:
# return
logger.info("Monitored configuration %s received at %d. Un-serialized in %d secs",
received_conf_part, t00, time.time() - t00)
logger.info("Scheduler received configuration : %s", received_conf_part)
# Now we create our pollers, reactionners and brokers
for link_type in ['pollers', 'reactionners', 'brokers']:
if link_type not in self.cur_conf['satellites']:
logger.error("Missing %s in the configuration!", link_type)
continue
my_satellites = getattr(self, link_type, {})
received_satellites = self.cur_conf['satellites'][link_type]
for link_uuid in received_satellites:
rs_conf = received_satellites[link_uuid]
logger.debug("- received %s - %s: %s", rs_conf['instance_id'],
rs_conf['type'], rs_conf['name'])
# Must look if we already had a configuration and save our broks
already_got = rs_conf['instance_id'] in my_satellites
broks = []
actions = {}
wait_homerun = {}
external_commands = {}
running_id = 0
if already_got:
logger.warning("I already got: %s", rs_conf['instance_id'])
# Save some information
running_id = my_satellites[link_uuid].running_id
(broks, actions,
wait_homerun, external_commands) = \
my_satellites[link_uuid].get_and_clear_context()
# Delete the former link
del my_satellites[link_uuid]
# My new satellite link...
new_link = SatelliteLink.get_a_satellite_link(link_type[:-1],
rs_conf)
my_satellites[new_link.uuid] = new_link
logger.info("I got a new %s satellite: %s", link_type[:-1], new_link)
new_link.running_id = running_id
new_link.external_commands = external_commands
new_link.broks = broks
new_link.wait_homerun = wait_homerun
new_link.actions = actions
# Replacing the satellite address and port by those defined in satellite_map
if new_link.name in self.cur_conf['override_conf'].get('satellite_map', {}):
override_conf = self.cur_conf['override_conf']
overriding = override_conf.get('satellite_map')[new_link.name]
logger.warning("Do not override the configuration for: %s, with: %s. "
"Please check whether this is necessary!",
new_link.name, overriding)
# First mix conf and override_conf to have our definitive conf
for prop in getattr(self.cur_conf, 'override_conf', []):
logger.debug("Overriden: %s / %s ", prop, getattr(received_conf_part, prop, None))
logger.debug("Overriding: %s / %s ", prop, self.cur_conf['override_conf'])
setattr(received_conf_part, prop, self.cur_conf['override_conf'].get(prop, None))
# Scheduler modules
if not self.have_modules:
try:
logger.debug("Modules configuration: %s", self.cur_conf['modules'])
self.modules = unserialize(self.cur_conf['modules'], no_load=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.debug("I received some modules configuration: %s", self.modules)
self.have_modules = True
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
if received_conf_part:
logger.info("Loading configuration...")
# Propagate the global parameters to the configuration items
received_conf_part.explode_global_conf()
# We give the configuration to our scheduler
self.sched.reset()
self.sched.load_conf(self.cur_conf['instance_id'],
self.cur_conf['instance_name'],
received_conf_part)
# Once loaded, the scheduler has an inner pushed_conf object
logger.info("Loaded: %s", self.sched.pushed_conf)
# Update the scheduler ticks according to the daemon configuration
self.sched.update_recurrent_works_tick(self)
# We must update our pushed configuration macros with correct values
# from the configuration parameters
# self.sched.pushed_conf.fill_resource_macros_names_macros()
# Creating the Macroresolver Class & unique instance
m_solver = MacroResolver()
m_solver.init(received_conf_part)
# Now create the external commands manager
# We are an applyer: our role is not to dispatch commands, but to apply them
ecm = ExternalCommandManager(
received_conf_part, 'applyer', self.sched,
received_conf_part.accept_passive_unknown_check_results,
received_conf_part.log_external_commands)
# Scheduler needs to know about this external command manager to use it if necessary
self.sched.external_commands_manager = ecm
# Ok now we can load the retention data
self.sched.retention_load()
# Log hosts/services initial states
self.sched.log_initial_states()
# Create brok new conf
brok = Brok({'type': 'new_conf', 'data': {}})
self.sched.add_brok(brok)
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
my_satellites = self.get_links_of_type(s_type='')
for satellite in list(my_satellites.values()):
logger.info("- : %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
if received_conf_part:
# Enable the scheduling process
logger.info("Loaded: %s", self.sched.pushed_conf)
self.sched.start_scheduling()
# Now I have a configuration!
self.have_conf = True | [
"Setup new conf received for scheduler\n\n :return: None\n "
] |
Please provide a description of the function:def clean_previous_run(self):
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear() | [
"Clean variables from previous configuration\n\n :return: None\n "
] |
Please provide a description of the function:def get_daemon_stats(self, details=False):
# Call the base Daemon one
res = super(Alignak, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type, 'monitored_objects': {}})
counters = res['counters']
# Satellites counters
counters['brokers'] = len(self.brokers)
counters['pollers'] = len(self.pollers)
counters['reactionners'] = len(self.reactionners)
counters['receivers'] = len(self.receivers)
if not self.sched:
return res
# # Hosts/services problems counters
# m_solver = MacroResolver()
# counters['hosts_problems'] = m_solver._get_total_host_problems()
# counters['hosts_unhandled_problems'] = m_solver._get_total_host_problems_unhandled()
# counters['services_problems'] = m_solver._get_total_service_problems()
# counters['services_unhandled_problems'] = m_solver._get_total_service_problems_unhandled()
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=details)
res['counters'].update(scheduler_stats['counters'])
scheduler_stats.pop('counters')
res.update(scheduler_stats)
return res | [
"Increase the stats provided by the Daemon base class\n\n :return: stats dictionary\n :rtype: dict\n "
] |
Please provide a description of the function:def get_monitoring_problems(self):
res = {}
if not self.sched:
return res
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=True)
if 'livesynthesis' in scheduler_stats:
res['livesynthesis'] = scheduler_stats['livesynthesis']
if 'problems' in scheduler_stats:
res['problems'] = scheduler_stats['problems']
return res | [
"Get the current scheduler livesynthesis\n\n :return: live synthesis and problems dictionary\n :rtype: dict\n "
] |
Please provide a description of the function:def merge(self, services):
for extinfo in self:
if hasattr(extinfo, 'register') and not getattr(extinfo, 'register'):
# We don't have to merge template
continue
hosts_names = extinfo.get_name().split(",")
for host_name in hosts_names:
serv = services.find_srv_by_name_and_hostname(host_name,
extinfo.service_description)
if serv is not None:
# Fusion
self.merge_extinfo(serv, extinfo) | [
"Merge extended host information into services\n\n :param services: services list, to look for a specific one\n :type services: alignak.objects.service.Services\n :return: None\n "
] |
Please provide a description of the function:def merge_extinfo(service, extinfo):
properties = ['notes', 'notes_url', 'icon_image', 'icon_image_alt']
# service properties have precedence over serviceextinfo properties
for prop in properties:
if getattr(service, prop) == '' and getattr(extinfo, prop) != '':
setattr(service, prop, getattr(extinfo, prop)) | [
"Merge extended host information into a service\n\n :param service: the service to edit\n :type service: alignak.objects.service.Service\n :param extinfo: the external info we get data from\n :type extinfo: alignak.objects.serviceextinfo.ServiceExtInfo\n :return: None\n "
] |
Please provide a description of the function:def get_command_and_args(self):
r
# First protect
p_call = self.call.replace(r'\!', '___PROTECT_EXCLAMATION___')
tab = p_call.split('!')
return tab[0].strip(), [s.replace('___PROTECT_EXCLAMATION___', '!') for s in tab[1:]] | [
"We want to get the command and the args with ! splitting.\n but don't forget to protect against the \\! to avoid splitting on them\n\n Remember: A Nagios-like command is command_name!arg1!arg2!...\n\n :return: None\n "
] |
Please provide a description of the function:def get_a_satellite_link(sat_type, sat_dict):
cls = get_alignak_class('alignak.objects.%slink.%sLink' % (sat_type, sat_type.capitalize()))
return cls(params=sat_dict, parsing=False) | [
"Get a SatelliteLink object for a given satellite type and a dictionary\n\n :param sat_type: type of satellite\n :param sat_dict: satellite configuration data\n :return:\n "
] |
Please provide a description of the function:def get_livestate(self):
livestate = 0
if self.active:
if not self.reachable:
livestate = 1
elif not self.alive:
livestate = 2
else:
livestate = 3
livestate_output = "%s/%s is %s" % (self.type, self.name, [
"up and running.",
"warning because not reachable.",
"critical because not responding.",
"not active by configuration."
][livestate])
return (livestate, livestate_output) | [
"Get the SatelliteLink live state.\n\n The live state is a tuple information containing a state identifier and a message, where:\n state is:\n - 0 for an up and running satellite\n - 1 if the satellite is not reachale\n - 2 if the satellite is dead\n - 3 else (not active)\n\n :return: tuple\n "
] |
Please provide a description of the function:def set_arbiter_satellite_map(self, satellite_map=None):
self.satellite_map = {
'address': self.address, 'port': self.port,
'use_ssl': self.use_ssl, 'hard_ssl_name_check': self.hard_ssl_name_check
}
if satellite_map:
self.satellite_map.update(satellite_map) | [
"\n satellite_map is the satellites map in current context:\n - A SatelliteLink is owned by an Arbiter\n - satellite_map attribute of a SatelliteLink is the map defined\n IN THE satellite configuration but for creating connections,\n we need to have the satellites map from the Arbiter point of view\n\n :return: None\n "
] |
Please provide a description of the function:def get_and_clear_context(self):
res = (self.broks, self.actions, self.wait_homerun, self.pushed_commands)
self.broks = []
self.actions = {}
self.wait_homerun = {}
self.pushed_commands = []
return res | [
"Get and clean all of our broks, actions, external commands and homerun\n\n :return: list of all broks of the satellite link\n :rtype: list\n "
] |
Please provide a description of the function:def prepare_for_conf(self):
logger.debug("- preparing: %s", self)
self.cfg = {
'self_conf': self.give_satellite_cfg(),
'schedulers': {},
'arbiters': {}
}
logger.debug("- prepared: %s", self.cfg) | [
"Initialize the pushed configuration dictionary\n with the inner properties that are to be propagated to the satellite link.\n\n :return: None\n "
] |
Please provide a description of the function:def give_satellite_cfg(self):
# All the satellite link class properties that are 'to_send' are stored in a
# dictionary to be pushed to the satellite when the configuration is dispatched
res = {}
properties = self.__class__.properties
for prop, entry in list(properties.items()):
if hasattr(self, prop) and entry.to_send:
res[prop] = getattr(self, prop)
return res | [
"Get the default information for a satellite.\n\n Overridden by the specific satellites links\n\n :return: dictionary of information common to all the links\n :rtype: dict\n "
] |
Please provide a description of the function:def give_satellite_json(self):
daemon_properties = ['type', 'name', 'uri', 'spare', 'configuration_sent',
'realm_name', 'manage_sub_realms',
'active', 'reachable', 'alive', 'passive',
'last_check', 'polling_interval', 'max_check_attempts']
(livestate, livestate_output) = self.get_livestate()
res = {
"livestate": livestate,
"livestate_output": livestate_output
}
for sat_prop in daemon_properties:
res[sat_prop] = getattr(self, sat_prop, 'not_yet_defined')
return res | [
"Get the json information for a satellite.\n\n This to provide information that will be exposed by a daemon on its HTTP interface.\n\n :return: dictionary of information common to all the links\n :rtype: dict\n "
] |
Please provide a description of the function:def manages(self, cfg_part):
logger.debug("Do I (%s/%s) manage: %s, my managed configuration(s): %s",
self.type, self.name, cfg_part, self.cfg_managed)
# If we do not yet manage a configuration
if not self.cfg_managed:
logger.info("I (%s/%s) do not manage (yet) any configuration!", self.type, self.name)
return False
# Check in the schedulers list configurations
for managed_cfg in list(self.cfg_managed.values()):
# If not even the cfg_id in the managed_conf, bail out
if managed_cfg['managed_conf_id'] == cfg_part.instance_id \
and managed_cfg['push_flavor'] == cfg_part.push_flavor:
logger.debug("I do manage this configuration: %s", cfg_part)
break
else:
logger.warning("I (%s/%s) do not manage this configuration: %s",
self.type, self.name, cfg_part)
return False
return True | [
"Tell if the satellite is managing this configuration part\n\n The managed configuration is formed as a dictionary indexed on the link instance_id:\n {\n u'SchedulerLink_1': {\n u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',\n u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320',\n u'managed_conf_id': [u'Config_1']\n }\n }\n\n Note that the managed configuration is a string array rather than a simple string...\n no special for this reason, probably due to the serialization when the configuration is\n pushed :/\n\n :param cfg_part: configuration part as prepare by the Dispatcher\n :type cfg_part: Conf\n :return: True if the satellite manages this configuration\n :rtype: bool\n "
] |
Please provide a description of the function:def create_connection(self):
# Create the HTTP client for the connection
try:
self.con = HTTPClient(address=self.satellite_map['address'],
port=self.satellite_map['port'],
short_timeout=self.short_timeout, long_timeout=self.long_timeout,
use_ssl=self.satellite_map['use_ssl'],
strong_ssl=self.satellite_map['hard_ssl_name_check'])
self.uri = self.con.uri
except HTTPClientException as exp:
# logger.error("Error with '%s' when creating client: %s", self.name, str(exp))
# Set the satellite as dead
self.set_dead()
raise LinkError("Error with '%s' when creating client: %s" % (self.name, str(exp))) | [
"Initialize HTTP connection with a satellite (con attribute) and\n set its uri attribute\n\n This is called on the satellite link initialization\n\n :return: None\n "
] |
Please provide a description of the function:def set_alive(self):
was_alive = self.alive
self.alive = True
self.reachable = True
self.attempt = 0
# We came from dead to alive! We must propagate the good news
if not was_alive:
logger.info("Setting %s satellite as alive :)", self.name)
self.broks.append(self.get_update_status_brok()) | [
"Set alive, reachable, and reset attempts.\n If we change state, raise a status brok update\n\n alive, means the daemon is prenset in the system\n reachable, means that the HTTP connection is valid\n\n With this function we confirm that the daemon is reachable and, thus, we assume it is alive!\n\n :return: None\n "
] |
Please provide a description of the function:def set_dead(self):
was_alive = self.alive
self.alive = False
self.reachable = False
self.attempt = 0
# We will have to create a new connection...
self.con = None
# We are dead now! We must propagate the sad news...
if was_alive and not self.stopping:
logger.warning("Setting the satellite %s as dead :(", self.name)
self.broks.append(self.get_update_status_brok()) | [
"Set the satellite into dead state:\n If we change state, raise a status brok update\n\n :return:None\n "
] |
Please provide a description of the function:def add_failed_check_attempt(self, reason=''):
self.reachable = False
self.attempt = self.attempt + 1
logger.debug("Failed attempt for %s (%d/%d), reason: %s",
self.name, self.attempt, self.max_check_attempts, reason)
# Don't need to warn again and again if the satellite is already dead
# Only warn when it is alive
if self.alive:
if not self.stopping:
logger.warning("Add failed attempt for %s (%d/%d) - %s",
self.name, self.attempt, self.max_check_attempts, reason)
else:
logger.info("Stopping... failed attempt for %s (%d/%d) - also probably stopping",
self.name, self.attempt, self.max_check_attempts)
# If we reached the maximum attempts, set the daemon as dead
if self.attempt >= self.max_check_attempts:
if not self.stopping:
logger.warning("Set %s as dead, too much failed attempts (%d), last problem is: %s",
self.name, self.max_check_attempts, reason)
else:
logger.info("Stopping... set %s as dead, too much failed attempts (%d)",
self.name, self.max_check_attempts)
self.set_dead() | [
"Set the daemon as unreachable and add a failed attempt\n if we reach the maximum attempts, set the daemon as dead\n\n :param reason: the reason of adding an attempts (stack trace sometimes)\n :type reason: str\n :return: None\n "
] |
Please provide a description of the function:def valid_connection(*outer_args, **outer_kwargs):
# pylint: disable=unused-argument, no-method-argument
def decorator(func): # pylint: disable=missing-docstring
def decorated(*args, **kwargs): # pylint: disable=missing-docstring
# outer_args and outer_kwargs are the decorator arguments
# args and kwargs are the decorated function arguments
link = args[0]
if not link.con:
raise LinkError("The connection is not created for %s" % link.name)
if not link.running_id:
raise LinkError("The connection is not initialized for %s" % link.name)
return func(*args, **kwargs)
return decorated
return decorator | [
"Check if the daemon connection is established and valid"
] |
Please provide a description of the function:def communicate(*outer_args, **outer_kwargs):
# pylint: disable=unused-argument, no-method-argument
def decorator(func): # pylint: disable=missing-docstring
def decorated(*args, **kwargs): # pylint: disable=missing-docstring
# outer_args and outer_kwargs are the decorator arguments
# args and kwargs are the decorated function arguments
fn_name = func.__name__
link = args[0]
if not link.alive:
logger.warning("%s is not alive for %s", link.name, fn_name)
return None
try:
if not link.reachable:
raise LinkError("The %s %s is not reachable" % (link.type, link.name))
logger.debug("[%s] Calling: %s, %s, %s", link.name, fn_name, args, kwargs)
return func(*args, **kwargs)
except HTTPClientConnectionException as exp:
# A Connection error is raised when the daemon connection cannot be established
# No way with the configuration parameters!
if not link.stopping:
logger.warning("A daemon (%s/%s) that we must be related with "
"cannot be connected: %s", link.type, link.name, exp)
else:
logger.info("Stopping... daemon (%s/%s) cannot be connected. "
"It is also probably stopping or yet stopped.",
link.type, link.name)
link.set_dead()
except (LinkError, HTTPClientTimeoutException) as exp:
link.add_failed_check_attempt("Connection timeout "
"with '%s': %s" % (fn_name, str(exp)))
return False
except HTTPClientDataException as exp:
# A Data error is raised when the daemon HTTP reponse is not 200!
# No way with the communication if some problems exist in the daemon interface!
# Abort all
err = "Some daemons that we must be related with " \
"have some interface problems. Sorry, I bail out"
logger.error(err)
os.sys.exit(err)
except HTTPClientException as exp:
link.add_failed_check_attempt("Error with '%s': %s" % (fn_name, str(exp)))
return None
return decorated
return decorator | [
"Check if the daemon connection is authorized and valid"
] |
Please provide a description of the function:def get_running_id(self):
former_running_id = self.running_id
logger.info(" get the running identifier for %s %s.", self.type, self.name)
# An exception is raised in this function if the daemon is not reachable
self.running_id = self.con.get('identity')
if isinstance(self.running_id, dict):
self.running_id = self.running_id['running_id']
if former_running_id == 0:
if self.running_id:
logger.info(" -> got: %s.", self.running_id)
former_running_id = self.running_id
# If the daemon has just started or has been restarted: it has a new running_id.
if former_running_id != self.running_id:
if former_running_id:
logger.info(" -> The %s %s running identifier changed: %s. "
"The daemon was certainly restarted!",
self.type, self.name, self.running_id)
# So we clear all verifications, they are obsolete now.
logger.info("The running id of the %s %s changed (%s), "
"we must clear its context.",
self.type, self.name, self.running_id)
(_, _, _, _) = self.get_and_clear_context()
# Set the daemon as alive
self.set_alive()
return True | [
"Send a HTTP request to the satellite (GET /identity)\n Used to get the daemon running identifier that allows to know if the daemon got restarted\n\n This is called on connection initialization or re-connection\n\n If the daemon is notreachable, this function will raise an exception and the caller\n will receive a False as return\n\n :return: Boolean indicating if the running id was received\n :type: bool\n "
] |
Please provide a description of the function:def stop_request(self, stop_now=False):
logger.debug("Sending stop request to %s, stop now: %s", self.name, stop_now)
res = self.con.get('stop_request', {'stop_now': '1' if stop_now else '0'})
return res | [
"Send a stop request to the daemon\n\n :param stop_now: stop now or go to stop wait mode\n :type stop_now: bool\n :return: the daemon response (True)\n "
] |
Please provide a description of the function:def update_infos(self, forced=False, test=False):
logger.debug("Update informations, forced: %s", forced)
# First look if it's not too early to ping
now = time.time()
if not forced and self.last_check and self.last_check + self.polling_interval > now:
logger.debug("Too early to ping %s, ping period is %ds!, last check: %d, now: %d",
self.name, self.polling_interval, self.last_check, now)
return None
self.get_conf(test=test)
# Update the daemon last check timestamp
self.last_check = time.time()
# Update the state of this element
self.broks.append(self.get_update_status_brok())
return self.cfg_managed | [
"Update satellite info each self.polling_interval seconds\n so we smooth arbiter actions for just useful actions.\n\n Raise a satellite update status Brok\n\n If forced is True, then ignore the ping period. This is used when the configuration\n has not yet been dispatched to the Arbiter satellites.\n\n If test is True, do not really ping the daemon (useful for the unit tests only)\n\n :param forced: ignore the ping smoothing\n :type forced: bool\n :param test:\n :type test: bool\n :return:\n None if the last request is too recent,\n False if a timeout was raised during the request,\n else the managed configurations dictionary\n "
] |
Please provide a description of the function:def get_daemon_stats(self, details=False):
logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get('stats%s' % ('?details=1' if details else '')) | [
"Send a HTTP request to the satellite (GET /get_daemon_stats)\n\n :return: Daemon statistics\n :rtype: dict\n "
] |
Please provide a description of the function:def get_initial_broks(self, broker_name):
logger.debug("Getting initial broks for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get('_initial_broks', {'broker_name': broker_name}, wait=True) | [
"Send a HTTP request to the satellite (GET /_initial_broks)\n\n Used to build the initial broks for a broker connecting to a scheduler\n\n :param broker_name: the concerned broker name\n :type broker_name: str\n :return: Boolean indicating if the running id changed\n :type: bool\n "
] |
Please provide a description of the function:def wait_new_conf(self):
logger.debug("Wait new configuration for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get('_wait_new_conf') | [
"Send a HTTP request to the satellite (GET /wait_new_conf)\n\n :return: True if wait new conf, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def put_conf(self, configuration, test=False):
logger.debug("Sending configuration to %s, %s %s", self.name, self.alive, self.reachable)
# ----------
if test:
setattr(self, 'unit_test_pushed_configuration', configuration)
# print("*** unit tests - sent configuration %s: %s" % (self.name, configuration))
return True
# ----------
return self.con.post('_push_configuration', {'conf': configuration}, wait=True) | [
"Send the configuration to the satellite\n HTTP request to the satellite (POST /push_configuration)\n\n If test is True, store the configuration internally\n\n :param configuration: The conf to send (data depend on the satellite)\n :type configuration:\n :return: None\n "
] |
Please provide a description of the function:def has_a_conf(self, magic_hash=None): # pragma: no cover
logger.debug("Have a configuration for %s, %s %s", self.name, self.alive, self.reachable)
self.have_conf = self.con.get('_have_conf', {'magic_hash': magic_hash})
return self.have_conf | [
"Send a HTTP request to the satellite (GET /have_conf)\n Used to know if the satellite has a conf\n\n :param magic_hash: Config hash. Only used for HA arbiter communication\n :type magic_hash: int\n :return: Boolean indicating if the satellite has a (specific) configuration\n :type: bool\n "
] |
Please provide a description of the function:def get_conf(self, test=False):
logger.debug("Get managed configuration for %s, %s %s",
self.name, self.alive, self.reachable)
# ----------
if test:
self.cfg_managed = {}
self.have_conf = True
logger.debug("Get managed configuration test ...")
if getattr(self, 'unit_test_pushed_configuration', None) is not None:
# Note this is a dict not a SatelliteLink object !
for scheduler_link in self.unit_test_pushed_configuration['schedulers'].values():
self.cfg_managed[scheduler_link['instance_id']] = {
'hash': scheduler_link['hash'],
'push_flavor': scheduler_link['push_flavor'],
'managed_conf_id': scheduler_link['managed_conf_id']
}
# print("*** unit tests - get managed configuration %s: %s"
# % (self.name, self.cfg_managed))
# ----------
else:
self.cfg_managed = self.con.get('managed_configurations')
logger.debug("My (%s) fresh managed configuration: %s", self.name, self.cfg_managed)
self.have_conf = (self.cfg_managed != {})
return self.cfg_managed | [
"Send a HTTP request to the satellite (GET /managed_configurations)\n and update the cfg_managed attribute with the new information\n Set to {} on failure\n\n the managed configurations are a dictionary which keys are the scheduler link instance id\n and the values are the push_flavor\n\n If test is True, returns the unit test internally stored configuration\n\n Returns False if a timeout is raised\n\n :return: see @communicate, or the managed configuration\n "
] |
Please provide a description of the function:def push_broks(self, broks):
logger.debug("[%s] Pushing %d broks", self.name, len(broks))
return self.con.post('_push_broks', {'broks': broks}, wait=True) | [
"Send a HTTP request to the satellite (POST /push_broks)\n Send broks to the satellite\n\n :param broks: Brok list to send\n :type broks: list\n :return: True on success, False on failure\n :rtype: bool\n "
] |
Please provide a description of the function:def push_actions(self, actions, scheduler_instance_id):
logger.debug("Pushing %d actions from %s", len(actions), scheduler_instance_id)
return self.con.post('_push_actions', {'actions': actions,
'scheduler_instance_id': scheduler_instance_id},
wait=True) | [
"Post the actions to execute to the satellite.\n Indeed, a scheduler post its checks to a poller and its actions to a reactionner.\n\n :param actions: Action list to send\n :type actions: list\n :param scheduler_instance_id: Scheduler instance identifier\n :type scheduler_instance_id: uuid\n :return: True on success, False on failure\n :rtype: bool\n "
] |
Please provide a description of the function:def push_results(self, results, scheduler_name):
logger.debug("Pushing %d results", len(results))
result = self.con.post('put_results', {'results': results, 'from': scheduler_name},
wait=True)
return result | [
"Send a HTTP request to the satellite (POST /put_results)\n Send actions results to the satellite\n\n :param results: Results list to send\n :type results: list\n :param scheduler_name: Scheduler name\n :type scheduler_name: uuid\n :return: True on success, False on failure\n :rtype: bool\n "
] |
Please provide a description of the function:def push_external_commands(self, commands):
logger.debug("Pushing %d external commands", len(commands))
return self.con.post('_run_external_commands', {'cmds': commands}, wait=True) | [
"Send a HTTP request to the satellite (POST /r_un_external_commands)\n to send the external commands to the satellite\n\n :param results: Results list to send\n :type results: list\n :return: True on success, False on failure\n :rtype: bool\n "
] |
Please provide a description of the function:def get_external_commands(self):
res = self.con.get('_external_commands', wait=False)
logger.debug("Got %d external commands from %s: %s", len(res), self.name, res)
return unserialize(res, True) | [
"Send a HTTP request to the satellite (GET /_external_commands) to\n get the external commands from the satellite.\n\n :return: External Command list on success, [] on failure\n :rtype: list\n "
] |
Please provide a description of the function:def get_broks(self, broker_name):
res = self.con.get('_broks', {'broker_name': broker_name}, wait=False)
logger.debug("Got broks from %s: %s", self.name, res)
return unserialize(res, True) | [
"Send a HTTP request to the satellite (GET /_broks)\n Get broks from the satellite.\n Un-serialize data received.\n\n :param broker_name: the concerned broker link\n :type broker_name: BrokerLink\n :return: Broks list on success, [] on failure\n :rtype: list\n "
] |
Please provide a description of the function:def get_events(self):
res = self.con.get('_events', wait=False)
logger.debug("Got events from %s: %s", self.name, res)
return unserialize(res, True) | [
"Send a HTTP request to the satellite (GET /_events)\n Get monitoring events from the satellite.\n\n :return: Broks list on success, [] on failure\n :rtype: list\n "
] |
Please provide a description of the function:def get_results(self, scheduler_instance_id):
res = self.con.get('_results', {'scheduler_instance_id': scheduler_instance_id}, wait=True)
logger.debug("Got %d results from %s: %s", len(res), self.name, res)
return res | [
"Send a HTTP request to the satellite (GET /_results)\n Get actions results from satellite (only passive satellites expose this method.\n\n :param scheduler_instance_id: scheduler instance identifier\n :type scheduler_instance_id: str\n :return: Results list on success, [] on failure\n :rtype: list\n "
] |
Please provide a description of the function:def get_actions(self, params):
res = self.con.get('_checks', params, wait=True)
logger.debug("Got checks to execute from %s: %s", self.name, res)
return unserialize(res, True) | [
"Send a HTTP request to the satellite (GET /_checks)\n Get actions from the scheduler.\n Un-serialize data received.\n\n :param params: the request parameters\n :type params: str\n :return: Actions list on success, [] on failure\n :rtype: list\n "
] |
Please provide a description of the function:def linkify(self, modules):
logger.debug("Linkify %s with %s", self, modules)
self.linkify_s_by_module(modules) | [
"Link modules and Satellite links\n\n :param modules: Module object list\n :type modules: alignak.objects.module.Modules\n :return: None\n "
] |
Please provide a description of the function:def get_return_from(self, notif):
self.exit_status = notif.exit_status
self.execution_time = notif.execution_time | [
"Setter of exit_status and execution_time attributes\n\n :param notif: notification to get data from\n :type notif: alignak.notification.Notification\n :return: None\n "
] |
Please provide a description of the function:def get_initial_status_brok(self):
data = {'uuid': self.uuid}
self.fill_data_brok_from(data, 'full_status')
return Brok({'type': 'notification_raise', 'data': data}) | [
"Get a initial status brok\n\n :return: brok with wanted data\n :rtype: alignak.brok.Brok\n "
] |
Please provide a description of the function:def serialize(self):
res = super(Notification, self).serialize()
if res['command_call'] is not None:
if not isinstance(res['command_call'], string_types) and \
not isinstance(res['command_call'], dict):
res['command_call'] = res['command_call'].serialize()
return res | [
"This function serialize into a simple dict object.\n It is used when transferring data to other daemons over the network (http)\n\n Here we directly return all attributes\n\n :return: json representation of a Timeperiod\n :rtype: dict\n "
] |
Please provide a description of the function:def add(self, elt):
if isinstance(elt, Brok):
# For brok, we tag the brok with our instance_id
elt.instance_id = self.instance_id
if elt.type == 'monitoring_log':
# The brok is a monitoring event
with self.events_lock:
self.events.append(elt)
statsmgr.counter('events', 1)
else:
with self.broks_lock:
self.broks.append(elt)
statsmgr.counter('broks.added', 1)
elif isinstance(elt, ExternalCommand):
logger.debug("Queuing an external command '%s'", str(elt.__dict__))
with self.external_commands_lock:
self.external_commands.append(elt)
statsmgr.counter('external-commands.added', 1)
# Maybe we got a Message from the modules, it's way to ask something
# like from now a full data from a scheduler for example.
elif isinstance(elt, Message):
# We got a message, great!
logger.debug(str(elt.__dict__))
if elt.get_type() == 'NeedData':
data = elt.get_data()
# Full instance id means: I got no data for this scheduler
# so give me all dumb-ass!
if 'full_instance_id' in data:
c_id = data['full_instance_id']
source = getattr(elt, 'source', getattr(elt, '_source', None))
logger.info('The module %s is asking me to get all initial data '
'from the scheduler %d',
source, c_id)
# so we just reset the connection and the running_id,
# it will just get all new things
try:
self.schedulers[c_id]['con'] = None
self.schedulers[c_id]['running_id'] = 0
except KeyError: # maybe this instance was not known, forget it
logger.warning("the module %s ask me a full_instance_id "
"for an unknown ID (%d)!", source, c_id)
# Maybe a module tells me that it's dead, I must log its last words...
if elt.get_type() == 'ICrash':
data = elt.get_data()
logger.error('the module %s just crash! Please look at the traceback:',
data['name'])
logger.error(data['trace'])
statsmgr.counter('message.added', 1) | [
"Generic function to add objects to the daemon internal lists.\n Manage Broks, External commands and Messages (from modules queues)\n\n :param elt: object to add\n :type elt: alignak.AlignakObject\n :return: None\n "
] |
Please provide a description of the function:def manage_brok(self, brok):
# Unserialize the brok before consuming it
brok.prepare()
for module in self.modules_manager.get_internal_instances():
try:
_t0 = time.time()
module.manage_brok(brok)
statsmgr.timer('manage-broks.internal.%s' % module.get_name(), time.time() - _t0)
except Exception as exp: # pylint: disable=broad-except
logger.warning("The module %s raised an exception: %s, "
"I'm tagging it to restart later", module.get_name(), str(exp))
logger.exception(exp)
self.modules_manager.set_to_restart(module) | [
"Get a brok.\n We put brok data to the modules\n\n :param brok: object with data\n :type brok: object\n :return: None\n "
] |
Please provide a description of the function:def get_internal_broks(self):
statsmgr.gauge('get-new-broks-count.broker', len(self.internal_broks))
# Add the broks to our global list
self.external_broks.extend(self.internal_broks)
self.internal_broks = [] | [
"Get all broks from self.broks_internal_raised and append them to our broks\n to manage\n\n :return: None\n "
] |
Please provide a description of the function:def get_arbiter_broks(self):
with self.arbiter_broks_lock:
statsmgr.gauge('get-new-broks-count.arbiter', len(self.arbiter_broks))
# Add the broks to our global list
self.external_broks.extend(self.arbiter_broks)
self.arbiter_broks = [] | [
"Get the broks from the arbiters,\n but as the arbiter_broks list can be push by arbiter without Global lock,\n we must protect this with a lock\n\n TODO: really? check this arbiter behavior!\n\n :return: None\n "
] |
Please provide a description of the function:def get_new_broks(self):
for satellites in [self.schedulers, self.pollers, self.reactionners, self.receivers]:
for satellite_link in list(satellites.values()):
logger.debug("Getting broks from %s", satellite_link)
_t0 = time.time()
try:
tmp_broks = satellite_link.get_broks(self.name)
except LinkError:
logger.warning("Daemon %s connection failed, I could not get the broks!",
satellite_link)
else:
if tmp_broks:
logger.debug("Got %d Broks from %s in %s",
len(tmp_broks), satellite_link.name, time.time() - _t0)
statsmgr.gauge('get-new-broks-count.%s'
% (satellite_link.name), len(tmp_broks))
statsmgr.timer('get-new-broks-time.%s'
% (satellite_link.name), time.time() - _t0)
for brok in tmp_broks:
brok.instance_id = satellite_link.instance_id
# Add the broks to our global list
self.external_broks.extend(tmp_broks) | [
"Get new broks from our satellites\n\n :return: None\n "
] |
Please provide a description of the function:def setup_new_conf(self):
# pylint: disable=too-many-branches, too-many-locals
# Execute the base class treatment...
super(Broker, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# # self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
self.got_initial_broks = False
# Now we create our pollers, reactionners and receivers
for link_type in ['pollers', 'reactionners', 'receivers']:
if link_type not in self.cur_conf['satellites']:
logger.error("No %s in the configuration!", link_type)
continue
my_satellites = getattr(self, link_type, {})
received_satellites = self.cur_conf['satellites'][link_type]
for link_uuid in received_satellites:
rs_conf = received_satellites[link_uuid]
logger.debug("- received %s - %s: %s", rs_conf['instance_id'],
rs_conf['type'], rs_conf['name'])
# Must look if we already had a configuration and save our broks
already_got = rs_conf['instance_id'] in my_satellites
broks = []
actions = {}
wait_homerun = {}
external_commands = {}
running_id = 0
if already_got:
logger.warning("I already got: %s", rs_conf['instance_id'])
# Save some information
running_id = my_satellites[link_uuid].running_id
(broks, actions,
wait_homerun, external_commands) = \
my_satellites[link_uuid].get_and_clear_context()
# Delete the former link
del my_satellites[link_uuid]
# My new satellite link...
new_link = SatelliteLink.get_a_satellite_link(link_type[:-1],
rs_conf)
my_satellites[new_link.uuid] = new_link
logger.info("I got a new %s satellite: %s", link_type[:-1], new_link)
new_link.running_id = running_id
new_link.external_commands = external_commands
new_link.broks = broks
new_link.wait_homerun = wait_homerun
new_link.actions = actions
# Replace satellite address and port by those defined in satellite_map
# todo: check if it is really necessary! Add a unit test for this
# Not sure about this because of the daemons/satellites configuration
# if new_link.name in self_conf.get('satellite_map', {}):
# new_link = dict(new_link) # make a copy
# new_link.update(self_conf.get('satellite_map', {})[new_link.name])
if not self.have_modules:
try:
self.modules = unserialize(self.cur_conf['modules'], no_load=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.info("I received some modules configuration: %s", self.modules)
self.have_modules = True
# Ok now start, or restart them!
# Set modules, init them and start external ones
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
# Initialize connection with my schedulers first
logger.info("Initializing connection with my schedulers:")
my_satellites = self.get_links_of_type(s_type='scheduler')
for satellite in list(my_satellites.values()):
logger.info("- %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
for sat_type in ['arbiter', 'reactionner', 'poller', 'receiver']:
my_satellites = self.get_links_of_type(s_type=sat_type)
for satellite in list(my_satellites.values()):
logger.info("- %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Now I have a configuration!
self.have_conf = True | [
"Broker custom setup_new_conf method\n\n This function calls the base satellite treatment and manages the configuration needed\n for a broker daemon:\n - get and configure its pollers, reactionners and receivers relation\n - configure the modules\n\n :return: None\n "
] |
Please provide a description of the function:def clean_previous_run(self):
# Execute the base class treatment...
super(Broker, self).clean_previous_run()
# Clean all satellites relations
self.pollers.clear()
self.reactionners.clear()
self.receivers.clear()
# Clean our internal objects
self.external_broks = self.external_broks[:]
self.internal_broks = self.internal_broks[:]
with self.arbiter_broks_lock:
self.arbiter_broks = self.arbiter_broks[:]
self.external_commands = self.external_commands[:] | [
"Clean all (when we received new conf)\n\n :return: None\n "
] |
Please provide a description of the function:def do_loop_turn(self):
# pylint: disable=too-many-branches
if not self.got_initial_broks:
# Asking initial broks from my schedulers
my_satellites = self.get_links_of_type(s_type='scheduler')
for satellite in list(my_satellites.values()):
logger.info("Asking my initial broks from '%s'", satellite.name)
_t0 = time.time()
try:
my_initial_broks = satellite.get_initial_broks(self.name)
statsmgr.timer('broks.initial.%s.time' % satellite.name, time.time() - _t0)
if not my_initial_broks:
logger.info("No initial broks were raised, "
"my scheduler is not yet ready...")
return
self.got_initial_broks = True
logger.debug("Got %d initial broks from '%s'",
my_initial_broks, satellite.name)
statsmgr.gauge('broks.initial.%s.count' % satellite.name, my_initial_broks)
except LinkError as exp:
logger.warning("Scheduler connection failed, I could not get initial broks!")
logger.debug("Begin Loop: still some old broks to manage (%d)", len(self.external_broks))
if self.external_broks:
statsmgr.gauge('unmanaged.broks', len(self.external_broks))
# Try to see if one of my module is dead, and restart previously dead modules
self.check_and_del_zombie_modules()
# Call modules that manage a starting tick pass
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
# Maybe the last loop we did raised some broks internally
self.get_internal_broks()
# Also reap broks sent from the arbiters
self.get_arbiter_broks()
# Now get broks from our distant daemons
self.get_new_broks()
# Get the list of broks not yet sent to our external modules
_t0 = time.time()
broks_to_send = [brok for brok in self.external_broks if getattr(brok, 'to_be_sent', True)]
statsmgr.gauge('get-new-broks-count.to_send', len(broks_to_send))
# Send the broks to all external modules to_q queue so they can get the whole packet
# beware, the sub-process/queue can be die/close, so we put to restart the whole module
# instead of killing ourselves :)
for module in self.modules_manager.get_external_instances():
try:
_t00 = time.time()
queue_size = module.to_q.qsize()
statsmgr.gauge('queues.external.%s.to.size' % module.get_name(), queue_size)
module.to_q.put(broks_to_send)
statsmgr.timer('queues.external.%s.to.put' % module.get_name(), time.time() - _t00)
except Exception as exp: # pylint: disable=broad-except
# first we must find the modules
logger.warning("Module %s queue exception: %s, I'm tagging it to restart later",
module.get_name(), str(exp))
logger.exception(exp)
self.modules_manager.set_to_restart(module)
# No more need to send them
for brok in broks_to_send:
brok.to_be_sent = False
logger.debug("Time to send %s broks (%d secs)", len(broks_to_send), time.time() - _t0)
# Make the internal modules manage the broks
start = time.time()
while self.external_broks:
now = time.time()
# Do not 'manage' more than 0.8s, we must get new broks almost every second
if now - start > 0.8:
logger.info("I did not yet managed all my broks, still %d broks",
len(self.external_broks))
break
# Get the first brok in the list
brok = self.external_broks.pop(0)
if self.modules_manager.get_internal_instances():
self.manage_brok(brok)
# Make a very short pause to avoid overloading
self.make_a_pause(0.01, check_time_change=False)
else:
if getattr(brok, 'to_be_sent', False):
self.external_broks.append(brok)
# Maybe our external modules raised 'objects', so get them
if self.get_objects_from_from_queues():
statsmgr.gauge('external-commands.got.count', len(self.external_commands))
statsmgr.gauge('broks.got.count', len(self.external_broks)) | [
"Loop used to:\n * get initial status broks\n * check if modules are alive, if not restart them\n * get broks from ourself, the arbiters and our satellites\n * add broks to the queue of each external module\n * manage broks with each internal module\n\n If the internal broks management is longer than 0.8 seconds, postpone to hte next\n loop turn to avoid overloading the broker daemon.\n\n :return: None\n "
] |
Please provide a description of the function:def get_daemon_stats(self, details=False):
# Call the base Daemon one
res = super(Broker, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type})
counters = res['counters']
counters['broks-external'] = len(self.external_broks)
counters['broks-internal'] = len(self.internal_broks)
counters['broks-arbiter'] = len(self.arbiter_broks)
counters['satellites.pollers'] = len(self.pollers)
counters['satellites.reactionners'] = len(self.reactionners)
counters['satellites.receivers'] = len(self.receivers)
return res | [
"Increase the stats provided by the Daemon base class\n\n :return: stats dictionary\n :rtype: dict\n "
] |
Please provide a description of the function:def add_group_members(self, members):
if not isinstance(members, list):
members = [members]
if not getattr(self, 'group_members', None):
self.group_members = members
else:
self.group_members.extend(members) | [
"Add a new group member to the groups list\n\n :param members: member name\n :type members: str\n :return: None\n "
] |
Please provide a description of the function:def prepare_satellites(self, satellites):
for sat_type in ["scheduler", "reactionner", "poller", "broker", "receiver"]:
# We get potential TYPE at realm level first
for sat_link_uuid in getattr(self, "%ss" % sat_type):
if sat_link_uuid not in satellites:
continue
sat_link = satellites[sat_link_uuid]
# Found our declared satellite in the provided satellites
if sat_link.active and not sat_link.spare:
# Generic increment : realm.nb_TYPE += 1
setattr(self, "nb_%ss" % sat_type, getattr(self, "nb_%ss" % sat_type) + 1)
break
else:
self.add_error("Realm %s, satellite %s declared in the realm is not found "
"in the allowed satellites!" % (self.name, sat_link.name))
logger.error("Satellite %s declared in the realm %s not found "
"in the allowed satellites!", sat_link.name, self.name)
logger.info(" Realm %s: (in/potential) (schedulers:%d/%d) (pollers:%d/%d) "
"(reactionners:%d/%d) (brokers:%d/%d) (receivers:%d/%d)", self.name,
self.nb_schedulers, len(self.potential_schedulers),
self.nb_pollers, len(self.potential_pollers),
self.nb_reactionners, len(self.potential_reactionners),
self.nb_brokers, len(self.potential_brokers),
self.nb_receivers, len(self.potential_receivers)) | [
"Update the following attributes of a realm::\n\n * nb_*satellite type*s\n * self.potential_*satellite type*s\n\n (satellite types are scheduler, reactionner, poller, broker and receiver)\n\n :param satellites: dict of SatelliteLink objects\n :type satellites: dict\n :return: None\n "
] |
Please provide a description of the function:def get_realms_by_explosion(self, realms):
# If rec_tag is already set, then we detected a loop in the realms hierarchy!
if getattr(self, 'rec_tag', False):
self.add_error("Error: there is a loop in the realm definition %s" % self.get_name())
return None
# Ok, not in a loop, we tag the realm and parse its members
self.rec_tag = True
# Order realm members list by name
self.realm_members = sorted(self.realm_members)
for member in self.realm_members:
realm = realms.find_by_name(member)
if not realm:
self.add_unknown_members(member)
continue
children = realm.get_realms_by_explosion(realms)
if children is None:
# We got a loop in our children definition
self.all_sub_members = []
self.realm_members = []
return None
# Return the list of all unique members
return self.all_sub_members | [
"Get all members of this realm including members of sub-realms on multi-levels\n\n :param realms: realms list, used to look for a specific one\n :type realms: alignak.objects.realm.Realms\n :return: list of members and add realm to realm_members attribute\n :rtype: list\n "
] |
Please provide a description of the function:def set_level(self, level, realms):
self.level = level
if not self.level:
logger.info("- %s", self.get_name())
else:
logger.info(" %s %s", '+' * self.level, self.get_name())
self.all_sub_members = []
self.all_sub_members_names = []
for child in sorted(self.realm_members):
child = realms.find_by_name(child)
if not child:
continue
self.all_sub_members.append(child.uuid)
self.all_sub_members_names.append(child.get_name())
grand_children = child.set_level(self.level + 1, realms)
for grand_child in grand_children:
if grand_child in self.all_sub_members_names:
continue
grand_child = realms.find_by_name(grand_child)
if grand_child:
self.all_sub_members_names.append(grand_child.get_name())
self.all_sub_members.append(grand_child.uuid)
return self.all_sub_members_names | [
"Set the realm level in the realms hierarchy\n\n :return: None\n "
] |
Please provide a description of the function:def get_all_subs_satellites_by_type(self, sat_type, realms):
res = copy.copy(getattr(self, sat_type))
for member in self.all_sub_members:
res.extend(realms[member].get_all_subs_satellites_by_type(sat_type, realms))
return res | [
"Get all satellites of the wanted type in this realm recursively\n\n :param sat_type: satellite type wanted (scheduler, poller ..)\n :type sat_type:\n :param realms: all realms\n :type realms: list of realm object\n :return: list of satellite in this realm\n :rtype: list\n "
] |
Please provide a description of the function:def get_satellites_by_type(self, s_type):
if hasattr(self, s_type + 's'):
return getattr(self, s_type + 's')
logger.debug("[realm %s] do not have this kind of satellites: %s", self.name, s_type)
return [] | [
"Generic function to access one of the satellite attribute\n ie : self.pollers, self.reactionners ...\n\n :param s_type: satellite type wanted\n :type s_type: str\n :return: self.*type*s\n :rtype: list\n "
] |
Please provide a description of the function:def get_potential_satellites_by_type(self, satellites, s_type):
if not hasattr(self, 'potential_' + s_type + 's'):
logger.debug("[realm %s] do not have this kind of satellites: %s", self.name, s_type)
return []
matching_satellites = []
for sat_link in satellites:
if sat_link.uuid in getattr(self, s_type + 's'):
matching_satellites.append(sat_link)
if matching_satellites:
logger.debug("- found %ss: %s", s_type, matching_satellites)
return matching_satellites
for sat_link in satellites:
if sat_link.uuid in getattr(self, 'potential_' + s_type + 's'):
matching_satellites.append(sat_link)
# Do not limit to one satellite!
# break
logger.debug("- potential %ss: %s", s_type, matching_satellites)
return matching_satellites | [
"Generic function to access one of the potential satellite attribute\n ie : self.potential_pollers, self.potential_reactionners ...\n\n :param satellites: list of SatelliteLink objects\n :type satellites: SatelliteLink list\n :param s_type: satellite type wanted\n :type s_type: str\n :return: self.potential_*type*s\n :rtype: list\n "
] |
Please provide a description of the function:def get_nb_of_must_have_satellites(self, s_type):
if hasattr(self, 'nb_' + s_type + 's'):
return getattr(self, 'nb_' + s_type + 's')
logger.debug("[realm %s] do not have this kind of satellites: %s", self.name, s_type)
return 0 | [
"Generic function to access one of the number satellite attribute\n ie : self.nb_pollers, self.nb_reactionners ...\n\n :param s_type: satellite type wanted\n :type s_type: str\n :return: self.nb_*type*s\n :rtype: int\n "
] |
Please provide a description of the function:def get_links_for_a_broker(self, pollers, reactionners, receivers, realms,
manage_sub_realms=False):
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'receivers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
for poller_id in self.pollers:
poller = pollers[poller_id]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner_id in self.reactionners:
reactionner = reactionners[reactionner_id]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for receiver_id in self.receivers:
receiver = receivers[receiver_id]
cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg()
# If the broker manages sub realms, fill the satellite links...
if manage_sub_realms:
# Now pollers
for poller_id in self.get_all_subs_satellites_by_type('pollers', realms):
poller = pollers[poller_id]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
# Now reactionners
for reactionner_id in self.get_all_subs_satellites_by_type('reactionners', realms):
reactionner = reactionners[reactionner_id]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
# Now receivers
for receiver_id in self.get_all_subs_satellites_by_type('receivers', realms):
receiver = receivers[receiver_id]
cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg()
return cfg | [
"Get a configuration dictionary with pollers, reactionners and receivers links\n for a broker\n\n :param pollers: pollers\n :type pollers:\n :param reactionners: reactionners\n :type reactionners:\n :param receivers: receivers\n :type receivers:\n :param realms: realms\n :type realms:\n :param manage_sub_realms:\n :type manage_sub_realms: True if the borker manages sub realms\n\n :return: dict containing pollers, reactionners and receivers links (key is satellite id)\n :rtype: dict\n "
] |
Please provide a description of the function:def get_links_for_a_scheduler(self, pollers, reactionners, brokers):
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'brokers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
try:
for poller in self.pollers + self.get_potential_satellites_by_type(pollers, "poller"):
if poller in pollers:
poller = pollers[poller]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner in self.reactionners + self.get_potential_satellites_by_type(
reactionners, "reactionner"):
if reactionner in reactionners:
reactionner = reactionners[reactionner]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for broker in self.brokers + self.get_potential_satellites_by_type(brokers, "broker"):
if broker in brokers:
broker = brokers[broker]
cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
except Exception as exp: # pylint: disable=broad-except
logger.exception("realm.get_links_for_a_scheduler: %s", exp)
# for poller in self.get_potential_satellites_by_type(pollers, "poller"):
# logger.info("Poller: %s", poller)
# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
#
# for reactionner in self.get_potential_satellites_by_type(reactionners, "reactionner"):
# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
#
# for broker in self.get_potential_satellites_by_type(brokers, "broker"):
# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
return cfg | [
"Get a configuration dictionary with pollers, reactionners and brokers links\n for a scheduler\n\n :return: dict containing pollers, reactionners and brokers links (key is satellite id)\n :rtype: dict\n "
] |
Please provide a description of the function:def linkify(self):
logger.info("Known realms:")
for realm in self:
for tmp_realm in self:
# Ignore if it is me...
if tmp_realm == realm:
continue
# Ignore if I am a sub realm of another realm
if realm.get_name() in tmp_realm.realm_members:
break
else:
# This realm is not in the children of any realm
realm.level = 0
realm.set_level(0, self) | [
"The realms linkify is done during the default realms/satellites initialization in the\n Config class.\n\n This functione only finishes the process by setting the realm level property according\n to the realm position in the hierarchy.\n\n All ` level` 0 realms are main realms that have their own hierarchy.\n\n :return: None\n "
] |
Please provide a description of the function:def explode(self):
# Manage higher realms where defined
for realm in [tmp_realm for tmp_realm in self if tmp_realm.higher_realms]:
for parent in realm.higher_realms:
higher_realm = self.find_by_name(parent)
if higher_realm:
# Add the realm to its parent realm members
higher_realm.realm_members.append(realm.get_name())
for realm in self:
# Set a recursion tag to protect against loop
for tmp_realm in self:
tmp_realm.rec_tag = False
realm.get_realms_by_explosion(self)
# Clean the recursion tag
for tmp_realm in self:
del tmp_realm.rec_tag | [
"Explode realms with each realm_members and higher_realms to get all the\n realms sub realms.\n\n :return: None\n "
] |
Please provide a description of the function:def get_default(self, check=False):
found = []
for realm in sorted(self, key=lambda r: r.level):
if getattr(realm, 'default', False):
found.append(realm)
if not found:
# Retain as default realm the first realm in name alphabetical order
found_names = sorted([r.get_name() for r in self])
if not found_names:
self.add_error("No realm is defined in this configuration! "
"This should not be possible!")
return None
default_realm_name = found_names[0]
default_realm = self.find_by_name(default_realm_name)
default_realm.default = True
found.append(default_realm)
if check:
self.add_error("No realm is defined as the default one! "
"I set %s as the default realm" % default_realm_name)
default_realm = found[0]
if len(found) > 1:
# Retain as default realm the first so-called default realms in name alphabetical order
found_names = sorted([r.get_name() for r in found])
default_realm_name = found_names[0]
default_realm = self.find_by_name(default_realm_name)
# Set all found realms as non-default realms
for realm in found:
if realm.get_name() != default_realm_name:
realm.default = False
if check:
self.add_warning("More than one realm is defined as the default one: %s. "
"I set %s as the default realm."
% (','.join(found_names), default_realm_name))
self.default = default_realm
return default_realm | [
"Get the default realm\n\n :param check: check correctness if True\n :type check: bool\n :return: Default realm of Alignak configuration\n :rtype: alignak.objects.realm.Realm | None\n "
] |
Please provide a description of the function:def reload_configuration(self):
# If I'm not the master arbiter, ignore the command and raise a log
if not self.app.is_master:
message = u"I received a request to reload the monitored configuration. " \
u"I am not the Master arbiter, I ignore and continue to run."
logger.warning(message)
return {'_status': u'ERR', '_message': message}
message = "I received a request to reload the monitored configuration"
if self.app.loading_configuration:
message = message + "and I am still reloading the monitored configuration ;)"
else:
self.app.need_config_reload = True
logger.warning(message)
return {'_status': u'OK', '_message': message} | [
"Ask to the arbiter to reload the monitored configuration\n\n **Note** tha the arbiter will not reload its main configuration file (eg. alignak.ini)\n but it will reload the monitored objects from the Nagios legacy files or from the\n Alignak backend!\n\n In case of any error, this function returns an object containing some properties:\n '_status': 'ERR' because of the error\n `_message`: some more explanations about the error\n\n :return: True if configuration reload is accepted\n "
] |
Please provide a description of the function:def backend_notification(self, event=None, parameters=None):
# request_parameters = cherrypy.request.json
# event = request_parameters.get('event', event)
# parameters = request_parameters.get('parameters', parameters)
if event is None:
data = cherrypy.request.json
event = data.get('event', None)
if parameters is None:
data = cherrypy.request.json
parameters = data.get('parameters', None)
logger.warning("I got a backend notification: %s / %s", event, parameters)
# For a configuration reload event...
if event in ['creation', 'deletion']:
# If I'm the master, ignore the command and raise a log
if not self.app.is_master:
message = u"I received a request to reload the monitored configuration. " \
u"I am not the Master arbiter, I ignore and continue to run."
logger.warning(message)
return {'_status': u'ERR', '_message': message}
message = "I received a request to reload the monitored configuration."
if self.app.loading_configuration:
message += "I am still reloading the monitored configuration ;)"
logger.warning(message)
self.app.need_config_reload = True
return {'_status': u'OK', '_message': message}
return {'_status': u'OK', '_message': u"No action to do"} | [
"The Alignak backend raises an event to the Alignak arbiter\n -----\n Possible events are:\n - creation, for a realm or an host creation\n - deletion, for a realm or an host deletion\n\n Calls the reload configuration function if event is creation or deletion\n\n Else, nothing for the moment!\n\n In case of any error, this function returns an object containing some properties:\n '_status': 'ERR' because of the error\n `_message`: some more explanations about the error\n\n The `_status` field is 'OK' with an according `_message` to explain what the Arbiter\n will do depending upon the notification.\n\n :return: dict\n "
] |
Please provide a description of the function:def command(self, command=None,
timestamp=None, element=None, host=None, service=None, user=None, parameters=None):
# pylint: disable=too-many-branches
if cherrypy.request.method in ["POST"]:
if not cherrypy.request.json:
return {'_status': u'ERR',
'_message': u'You must POST parameters on this endpoint.'}
if command is None:
try:
command = cherrypy.request.json.get('command', None)
timestamp = cherrypy.request.json.get('timestamp', None)
element = cherrypy.request.json.get('element', None)
host = cherrypy.request.json.get('host', None)
service = cherrypy.request.json.get('service', None)
user = cherrypy.request.json.get('user', None)
parameters = cherrypy.request.json.get('parameters', None)
except AttributeError:
return {'_status': u'ERR', '_message': u'Missing command parameters'}
if not command:
return {'_status': u'ERR', '_message': u'Missing command parameter'}
fields = split_semicolon(command)
command_line = command.replace(fields[0], fields[0].upper())
if timestamp:
try:
timestamp = int(timestamp)
except ValueError:
return {'_status': u'ERR', '_message': u'Timestamp must be an integer value'}
command_line = '[%d] %s' % (timestamp, command_line)
if host or service or user:
if host:
command_line = '%s;%s' % (command_line, host)
if service:
command_line = '%s;%s' % (command_line, service)
if user:
command_line = '%s;%s' % (command_line, user)
elif element:
if '/' in element:
# Replace only the first /
element = element.replace('/', ';', 1)
command_line = '%s;%s' % (command_line, element)
if parameters:
command_line = '%s;%s' % (command_line, parameters)
# Add a command to get managed
logger.warning("Got an external command: %s", command_line)
self.app.add(ExternalCommand(command_line))
return {'_status': u'OK',
'_message': u"Got command: %s" % command_line,
'command': command_line} | [
" Request to execute an external command\n\n Allowed parameters are:\n `command`: mandatory parameter containing the whole command line or only the command name\n\n `timestamp`: optional parameter containing the timestamp. If not present, the\n current timestamp is added in the command line\n\n `element`: the targeted element that will be appended after the command name (`command`).\n If element contains a '/' character it is split to make an host and service.\n\n `host`, `service` or `user`: the targeted host, service or user. Takes precedence over\n the `element` to target a specific element\n\n `parameters`: the parameter that will be appended after all the arguments\n\n When using this endpoint with the HTTP GET method, the semi colons that are commonly used\n to separate the parameters must be replace with %3B! This because the ; is an accepted\n URL query parameters separator...\n\n Indeed, the recommended way of using this endpoint is to use the HTTP POST method.\n\n In case of any error, this function returns an object containing some properties:\n '_status': 'ERR' because of the error\n `_message`: some more explanations about the error\n\n The `_status` field is 'OK' with an according `_message` to explain what the Arbiter\n will do depending upon the notification. The `command` property contains the formatted\n external command.\n\n :return: dict\n "
] |
Please provide a description of the function:def monitoring_problems(self):
res = self.identity()
res['problems'] = {}
for scheduler_link in self.app.conf.schedulers:
sched_res = scheduler_link.con.get('monitoring_problems', wait=True)
res['problems'][scheduler_link.name] = {}
if '_freshness' in sched_res:
res['problems'][scheduler_link.name].update({'_freshness': sched_res['_freshness']})
if 'problems' in sched_res:
res['problems'][scheduler_link.name].update({'problems': sched_res['problems']})
res['_freshness'] = int(time.time())
return res | [
"Get Alignak detailed monitoring status\n\n This will return an object containing the properties of the `identity`, plus a `problems`\n object which contains 2 properties for each known scheduler:\n - _freshness, which is the timestamp when the provided data were fetched\n - problems, which is an object with the scheduler known problems:\n\n {\n ...\n\n \"problems\": {\n \"scheduler-master\": {\n \"_freshness\": 1528903945,\n \"problems\": {\n \"fdfc986d-4ab4-4562-9d2f-4346832745e6\": {\n \"last_state\": \"CRITICAL\",\n \"service\": \"dummy_critical\",\n \"last_state_type\": \"SOFT\",\n \"last_state_update\": 1528902442,\n \"last_hard_state\": \"CRITICAL\",\n \"last_hard_state_change\": 1528902442,\n \"last_state_change\": 1528902381,\n \"state\": \"CRITICAL\",\n \"state_type\": \"HARD\",\n \"host\": \"host-all-8\",\n \"output\": \"Hi, checking host-all-8/dummy_critical -> exit=2\"\n },\n \"2445f2a3-2a3b-4b13-96ed-4cfb60790e7e\": {\n \"last_state\": \"WARNING\",\n \"service\": \"dummy_warning\",\n \"last_state_type\": \"SOFT\",\n \"last_state_update\": 1528902463,\n \"last_hard_state\": \"WARNING\",\n \"last_hard_state_change\": 1528902463,\n \"last_state_change\": 1528902400,\n \"state\": \"WARNING\",\n \"state_type\": \"HARD\",\n \"host\": \"host-all-6\",\n \"output\": \"Hi, checking host-all-6/dummy_warning -> exit=1\"\n },\n ...\n }\n }\n }\n }\n\n :return: schedulers live synthesis list\n :rtype: dict\n "
] |
Please provide a description of the function:def livesynthesis(self):
res = self.identity()
res.update(self.app.get_livesynthesis())
return res | [
"Get Alignak live synthesis\n\n This will return an object containing the properties of the `identity`, plus a\n `livesynthesis`\n object which contains 2 properties for each known scheduler:\n - _freshness, which is the timestamp when the provided data were fetched\n - livesynthesis, which is an object with the scheduler live synthesis.\n\n An `_overall` fake scheduler is also contained in the schedulers list to provide the\n cumulated live synthesis. Before sending the results, the arbiter sums-up all its\n schedulers live synthesis counters in the `_overall` live synthesis.\n\n {\n ...\n\n \"livesynthesis\": {\n \"_overall\": {\n \"_freshness\": 1528947526,\n \"livesynthesis\": {\n \"hosts_total\": 11,\n \"hosts_not_monitored\": 0,\n \"hosts_up_hard\": 11,\n \"hosts_up_soft\": 0,\n \"hosts_down_hard\": 0,\n \"hosts_down_soft\": 0,\n \"hosts_unreachable_hard\": 0,\n \"hosts_unreachable_soft\": 0,\n \"hosts_flapping\": 0,\n \"hosts_problems\": 0,\n \"hosts_acknowledged\": 0,\n \"hosts_in_downtime\": 0,\n \"services_total\": 100,\n \"services_not_monitored\": 0,\n \"services_ok_hard\": 70,\n \"services_ok_soft\": 0,\n \"services_warning_hard\": 4,\n \"services_warning_soft\": 6,\n \"services_critical_hard\": 6,\n \"services_critical_soft\": 4,\n \"services_unknown_hard\": 3,\n \"services_unknown_soft\": 7,\n \"services_unreachable_hard\": 0,\n \"services_unreachable_soft\": 0,\n \"services_flapping\": 0,\n \"services_problems\": 0,\n \"services_acknowledged\": 0,\n \"services_in_downtime\": 0\n }\n }\n },\n \"scheduler-master\": {\n \"_freshness\": 1528947522,\n \"livesynthesis\": {\n \"hosts_total\": 11,\n \"hosts_not_monitored\": 0,\n \"hosts_up_hard\": 11,\n \"hosts_up_soft\": 0,\n \"hosts_down_hard\": 0,\n \"hosts_down_soft\": 0,\n \"hosts_unreachable_hard\": 0,\n \"hosts_unreachable_soft\": 0,\n \"hosts_flapping\": 0,\n \"hosts_problems\": 0,\n \"hosts_acknowledged\": 0,\n \"hosts_in_downtime\": 0,\n \"services_total\": 100,\n \"services_not_monitored\": 0,\n \"services_ok_hard\": 70,\n \"services_ok_soft\": 0,\n \"services_warning_hard\": 4,\n \"services_warning_soft\": 6,\n \"services_critical_hard\": 6,\n \"services_critical_soft\": 4,\n \"services_unknown_hard\": 3,\n \"services_unknown_soft\": 7,\n \"services_unreachable_hard\": 0,\n \"services_unreachable_soft\": 0,\n \"services_flapping\": 0,\n \"services_problems\": 0,\n \"services_acknowledged\": 0,\n \"services_in_downtime\": 0\n }\n }\n }\n }\n }\n\n :return: scheduler live synthesis\n :rtype: dict\n "
] |
Please provide a description of the function:def object(self, o_type, o_name=None):
for scheduler_link in self.app.conf.schedulers:
sched_res = scheduler_link.con.get('object', {'o_type': o_type, 'o_name': o_name},
wait=True)
if isinstance(sched_res, dict) and 'content' in sched_res:
return sched_res
return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type} | [
"Get a monitored object from the arbiter.\n\n Indeed, the arbiter requires the object from its schedulers. It will iterate in\n its schedulers list until a matching object is found. Else it will return a Json\n structure containing _status and _message properties.\n\n When found, the result is a serialized object which is a Json structure containing:\n - content: the serialized object content\n - __sys_python_module__: the python class of the returned object\n\n The Alignak unserialize function of the alignak.misc.serialization package allows\n to restore the initial object.\n\n .. code-block:: python\n\n from alignak.misc.serialization import unserialize\n from alignak.objects.hostgroup import Hostgroup\n raw_data = req.get(\"http://127.0.0.1:7768/object/hostgroup/allhosts\")\n print(\"Got: %s / %s\" % (raw_data.status_code, raw_data.content))\n assert raw_data.status_code == 200\n object = raw_data.json()\n group = unserialize(object, True)\n assert group.__class__ == Hostgroup\n assert group.get_name() == 'allhosts'\n\n As an example:\n {\n \"__sys_python_module__\": \"alignak.objects.hostgroup.Hostgroup\",\n \"content\": {\n \"uuid\": \"32248642-97dd-4f39-aaa2-5120112a765d\",\n \"name\": \"\",\n \"hostgroup_name\": \"allhosts\",\n \"use\": [],\n \"tags\": [],\n \"alias\": \"All Hosts\",\n \"notes\": \"\",\n \"definition_order\": 100,\n \"register\": true,\n \"unknown_members\": [],\n \"notes_url\": \"\",\n \"action_url\": \"\",\n\n \"imported_from\": \"unknown\",\n \"conf_is_correct\": true,\n \"configuration_errors\": [],\n \"configuration_warnings\": [],\n \"realm\": \"\",\n \"downtimes\": {},\n \"hostgroup_members\": [],\n \"members\": [\n \"553d47bc-27aa-426c-a664-49c4c0c4a249\",\n \"f88093ca-e61b-43ff-a41e-613f7ad2cea2\",\n \"df1e2e13-552d-43de-ad2a-fe80ad4ba979\",\n \"d3d667dd-f583-4668-9f44-22ef3dcb53ad\"\n ]\n }\n }\n\n :param o_type: searched object type\n :type o_type: str\n :param o_name: searched object name (or uuid)\n :type o_name: str\n :return: serialized object information\n :rtype: str\n "
] |
Please provide a description of the function:def dump(self, o_name=None, details=False, raw=False):
if details is not False:
details = bool(details)
if raw is not False:
raw = bool(raw)
res = {}
for scheduler_link in self.app.conf.schedulers:
sched_res = scheduler_link.con.get('dump', {'o_name': o_name,
'details': '1' if details else '',
'raw': '1' if raw else ''},
wait=True)
if isinstance(sched_res, dict) and \
'_status' in sched_res and sched_res['_status'] == 'ERR':
continue
res[scheduler_link.name] = sched_res
return res | [
"Dump an host (all hosts) from the arbiter.\n\n The arbiter will get the host (all hosts) information from all its schedulers.\n\n This gets the main host information from the scheduler. If details is set, then some\n more information are provided. This will not get all the host known attributes but only\n a reduced set that will inform about the host and its services status\n\n If raw is set the information are provided in two string lists formated as CSV strings.\n The first list element contains the hosts information and the second one contains the\n services information.\n\n If an host name is provided, this function will get only this host information, else\n all the scheduler hosts are returned.\n\n As an example (in raw format):\n {\n scheduler-master-3: [\n [\n \"type;host;name;last_check;state_id;state;state_type;is_problem;\n is_impact;output\",\n \"localhost;host;localhost;1532451740;0;UP;HARD;False;False;\n Host assumed to be UP\",\n \"host_2;host;host_2;1532451988;1;DOWN;HARD;True;False;I am always Down\"\n ],\n [\n \"type;host;name\",\n \"host_2;service;dummy_no_output;1532451981;0;OK;HARD;False;True;\n Service internal check result: 0\",\n \"host_2;service;dummy_warning;1532451960;4;UNREACHABLE;HARD;False;True;\n host_2-dummy_warning-1\",\n \"host_2;service;dummy_unreachable;1532451987;4;UNREACHABLE;HARD;False;True;\n host_2-dummy_unreachable-4\",\n \"host_2;service;dummy_random;1532451949;4;UNREACHABLE;HARD;False;True;\n Service internal check result: 2\",\n \"host_2;service;dummy_ok;1532452002;0;OK;HARD;False;True;host_2\",\n \"host_2;service;dummy_critical;1532451953;4;UNREACHABLE;HARD;False;True;\n host_2-dummy_critical-2\",\n \"host_2;service;dummy_unknown;1532451945;4;UNREACHABLE;HARD;False;True;\n host_2-dummy_unknown-3\",\n \"host_2;service;dummy_echo;1532451973;4;UNREACHABLE;HARD;False;True;\"\n ]\n ],\n scheduler-master-2: [\n [\n \"type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output\",\n \"host_0;host;host_0;1532451993;0;UP;HARD;False;False;I am always Up\",\n \"BR_host;host;BR_host;1532451991;0;UP;HARD;False;False;Host assumed to be UP\"\n ],\n [\n \"type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output\",\n \"host_0;service;dummy_no_output;1532451970;0;OK;HARD;False;False;\n Service internal check result: 0\",\n \"host_0;service;dummy_unknown;1532451964;3;UNKNOWN;HARD;True;False;\n host_0-dummy_unknown-3\",\n \"host_0;service;dummy_random;1532451991;1;WARNING;HARD;True;False;\n Service internal check result: 1\",\n \"host_0;service;dummy_warning;1532451945;1;WARNING;HARD;True;False;\n host_0-dummy_warning-1\",\n \"host_0;service;dummy_unreachable;1532451986;4;UNREACHABLE;HARD;True;False;\n host_0-dummy_unreachable-4\",\n \"host_0;service;dummy_ok;1532452012;0;OK;HARD;False;False;host_0\",\n \"host_0;service;dummy_critical;1532451987;2;CRITICAL;HARD;True;False;\n host_0-dummy_critical-2\",\n \"host_0;service;dummy_echo;1532451963;0;OK;HARD;False;False;\",\n \"BR_host;service;dummy_critical;1532451970;2;CRITICAL;HARD;True;False;\n BR_host-dummy_critical-2\",\n \"BR_host;service;BR_Simple_And;1532451895;1;WARNING;HARD;True;True;\",\n \"BR_host;service;dummy_unreachable;1532451981;4;UNREACHABLE;HARD;True;False;\n BR_host-dummy_unreachable-4\",\n \"BR_host;service;dummy_no_output;1532451975;0;OK;HARD;False;False;\n Service internal check result: 0\",\n \"BR_host;service;dummy_unknown;1532451955;3;UNKNOWN;HARD;True;False;\n BR_host-dummy_unknown-3\",\n \"BR_host;service;dummy_echo;1532451981;0;OK;HARD;False;False;\",\n \"BR_host;service;dummy_warning;1532451972;1;WARNING;HARD;True;False;\n BR_host-dummy_warning-1\",\n \"BR_host;service;dummy_random;1532451976;4;UNREACHABLE;HARD;True;False;\n Service internal check result: 4\",\n \"BR_host;service;dummy_ok;1532451972;0;OK;HARD;False;False;BR_host\"\n ]\n ],\n ...\n\n More information are available in the scheduler correponding API endpoint.\n\n :param o_type: searched object type\n :type o_type: str\n :param o_name: searched object name (or uuid)\n :type o_name: str\n :return: serialized object information\n :rtype: str\n "
] |
Please provide a description of the function:def status(self, details=False):
if details is not False:
details = bool(details)
return self.app.get_alignak_status(details=details) | [
"Get the overall alignak status\n\n Returns a list of the satellites as in:\n {\n services: [\n {\n livestate: {\n perf_data: \"\",\n timestamp: 1532106561,\n state: \"ok\",\n long_output: \"\",\n output: \"all daemons are up and running.\"\n },\n name: \"arbiter-master\"\n },\n {\n livestate: {\n name: \"poller_poller-master\",\n timestamp: 1532106561,\n long_output: \"Realm: (True). Listening on: http://127.0.0.1:7771/\",\n state: \"ok\",\n output: \"daemon is alive and reachable.\",\n perf_data: \"last_check=1532106560.17\"\n },\n name: \"poller-master\"\n },\n ...\n ...\n ],\n variables: { },\n livestate: {\n timestamp: 1532106561,\n long_output: \"broker-master - daemon is alive and reachable.\n poller-master - daemon is alive and reachable.\n reactionner-master - daemon is alive and reachable.\n receiver-master - daemon is alive and reachable.\n receiver-nsca - daemon is alive and reachable.\n scheduler-master - daemon is alive and reachable.\n scheduler-master-2 - daemon is alive and reachable.\n scheduler-master-3 - daemon is alive and reachable.\",\n state: \"up\",\n output: \"All my daemons are up and running.\",\n perf_data: \"\n 'servicesextinfo'=0 'businessimpactmodulations'=0 'hostgroups'=2\n 'resultmodulations'=0 'escalations'=0 'schedulers'=3 'hostsextinfo'=0\n 'contacts'=2 'servicedependencies'=0 'servicegroups'=1 'pollers'=1\n 'arbiters'=1 'receivers'=2 'macromodulations'=0 'reactionners'=1\n 'contactgroups'=2 'brokers'=1 'realms'=3 'services'=32 'commands'=11\n 'notificationways'=2 'timeperiods'=4 'modules'=0 'checkmodulations'=0\n 'hosts'=6 'hostdependencies'=0\"\n },\n name: \"My Alignak\",\n template: {\n notes: \"\",\n alias: \"My Alignak\",\n _templates: [\n \"alignak\",\n \"important\"\n ],\n active_checks_enabled: false,\n passive_checks_enabled: true\n }\n }\n\n :param details: Details are required (different from 0)\n :type details bool\n\n :return: dict with key *daemon_type* and value list of daemon name\n :rtype: dict\n "
] |
Please provide a description of the function:def events_log(self, details=False, count=0, timestamp=0):
if not count:
count = 1 + int(os.environ.get('ALIGNAK_EVENTS_LOG_COUNT',
self.app.conf.events_log_count))
count = int(count)
timestamp = float(timestamp)
logger.debug('Get max %d events, newer than %s out of %d',
count, timestamp, len(self.app.recent_events))
res = []
for log in reversed(self.app.recent_events):
if timestamp and timestamp > log['timestamp']:
break
if not count:
break
if details:
# Exposes the full object
res.append(log)
else:
res.append("%s - %s - %s"
% (log['date'], log['level'][0].upper(), log['message']))
logger.debug('Got %d events', len(res))
return res | [
"Get the most recent Alignak events\n\n If count is specifies it is the maximum number of events to return.\n\n If timestamp is specified, events older than this timestamp will not be returned\n\n The arbiter maintains a list of the most recent Alignak events. This endpoint\n provides this list.\n\n The default format is:\n [\n \"2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: guest;host_0;dummy_random;CRITICAL;1;\n notify-service-by-log;Service internal check result: 2\",\n \"2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: admin;host_0;dummy_random;CRITICAL;1;\n notify-service-by-log;Service internal check result: 2\",\n \"2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_critical;CRITICAL;SOFT;1;\n host_0-dummy_critical-2\",\n \"2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_random;CRITICAL;HARD;2;\n Service internal check result: 2\",\n \"2018-07-23 15:14:42 - I - SERVICE ALERT: host_0;dummy_unknown;UNKNOWN;HARD;2;\n host_0-dummy_unknown-3\"\n ]\n\n If you request on this endpoint with the *details* parameter (whatever its value...),\n you will get a detailed JSON output:\n [\n {\n timestamp: 1535517701.1817362,\n date: \"2018-07-23 15:16:35\",\n message: \"SERVICE ALERT: host_11;dummy_echo;UNREACHABLE;HARD;2;\",\n level: \"info\"\n },\n {\n timestamp: 1535517701.1817362,\n date: \"2018-07-23 15:16:32\",\n message: \"SERVICE NOTIFICATION: guest;host_0;dummy_random;OK;0;\n notify-service-by-log;Service internal check result: 0\",\n level: \"info\"\n },\n {\n timestamp: 1535517701.1817362,\n date: \"2018-07-23 15:16:32\",\n message: \"SERVICE NOTIFICATION: admin;host_0;dummy_random;OK;0;\n notify-service-by-log;Service internal check result: 0\",\n level: \"info\"\n },\n {\n timestamp: 1535517701.1817362,\n date: \"2018-07-23 15:16:32\",\n message: \"SERVICE ALERT: host_0;dummy_random;OK;HARD;2;\n Service internal check result: 0\",\n level: \"info\"\n },\n {\n timestamp: 1535517701.1817362,\n date: \"2018-07-23 15:16:19\",\n message: \"SERVICE ALERT: host_11;dummy_random;OK;HARD;2;\n Service internal check result: 0\",\n level: \"info\"\n }\n ]\n\n In this example, only the 5 most recent events are provided whereas the default value is\n to provide the 100 last events. This default counter may be changed thanks to the\n ``events_log_count`` configuration variable or\n ``ALIGNAK_EVENTS_LOG_COUNT`` environment variable.\n\n The date format may also be changed thanks to the ``events_date_format`` configuration\n variable.\n\n :return: list of the most recent events\n :rtype: list\n "
] |
Please provide a description of the function:def satellites_list(self, daemon_type=''):
with self.app.conf_lock:
res = {}
for s_type in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver', 'broker']:
if daemon_type and daemon_type != s_type:
continue
satellite_list = []
res[s_type] = satellite_list
for daemon_link in getattr(self.app.conf, s_type + 's', []):
satellite_list.append(daemon_link.name)
return res | [
"Get the arbiter satellite names sorted by type\n\n Returns a list of the satellites as in:\n {\n reactionner: [\n \"reactionner-master\"\n ],\n broker: [\n \"broker-master\"\n ],\n arbiter: [\n \"arbiter-master\"\n ],\n scheduler: [\n \"scheduler-master-3\",\n \"scheduler-master\",\n \"scheduler-master-2\"\n ],\n receiver: [\n \"receiver-nsca\",\n \"receiver-master\"\n ],\n poller: [\n \"poller-master\"\n ]\n }\n\n If a specific daemon type is requested, the list is reduced to this unique daemon type:\n {\n scheduler: [\n \"scheduler-master-3\",\n \"scheduler-master\",\n \"scheduler-master-2\"\n ]\n }\n\n :param daemon_type: daemon type to filter\n :type daemon_type: str\n :return: dict with key *daemon_type* and value list of daemon name\n :rtype: dict\n "
] |
Please provide a description of the function:def realms(self, details=False):
def get_realm_info(realm, realms, satellites, details=False):
res = {
"name": realm.get_name(),
"level": realm.level,
"hosts": realm.members,
"hostgroups": realm.group_members,
"children": {},
"satellites": {
}
}
for child in realm.realm_members:
child = realms.find_by_name(child)
if not child:
continue
realm_infos = get_realm_info(child, realms, satellites, details=details)
res['children'][child.get_name()] = realm_infos
for sat_type in ['scheduler', 'reactionner', 'broker', 'receiver', 'poller']:
res["satellites"][sat_type + 's'] = []
sats = realm.get_potential_satellites_by_type(satellites, sat_type)
for sat in sats:
if details:
res["satellites"][sat_type + 's'][sat.name] = sat.give_satellite_json()
else:
res["satellites"][sat_type + 's'].append(sat.name)
return res
if details is not False:
details = bool(details)
# Report our daemons states, but only if a dispatcher and the configuration is loaded
if not getattr(self.app, 'dispatcher', None) or not getattr(self.app, 'conf', None):
return {'_status': u'ERR', '_message': "Not yet available. Please come back later."}
res = {}
higher_realms = [realm for realm in self.app.conf.realms if realm.level == 0]
for realm in higher_realms:
res[realm.get_name()] = get_realm_info(realm, self.app.conf.realms,
self.app.dispatcher.all_daemons_links)
return res | [
"Return the realms / satellites configuration\n\n Returns an object containing the hierarchical realms configuration with the main\n information about each realm:\n {\n All: {\n satellites: {\n pollers: [\n \"poller-master\"\n ],\n reactionners: [\n \"reactionner-master\"\n ],\n schedulers: [\n \"scheduler-master\", \"scheduler-master-3\", \"scheduler-master-2\"\n ],\n brokers: [\n \"broker-master\"\n ],\n receivers: [\n \"receiver-master\", \"receiver-nsca\"\n ]\n },\n children: { },\n name: \"All\",\n members: [\n \"host_1\", \"host_0\", \"host_3\", \"host_2\", \"host_11\", \"localhost\"\n ],\n level: 0\n },\n North: {\n ...\n }\n }\n\n Sub realms defined inside a realm are provided in the `children` property of their\n parent realm and they contain the same information as their parent..\n The `members` realm contain the list of the hosts members of the realm.\n\n If ``details`` is required, each realm will contain more information about each satellite\n involved in the realm management:\n {\n All: {\n satellites: {\n pollers: [\n {\n passive: false,\n name: \"poller-master\",\n livestate_output: \"poller/poller-master is up and running.\",\n reachable: true,\n uri: \"http://127.0.0.1:7771/\",\n alive: true,\n realm_name: \"All\",\n manage_sub_realms: true,\n spare: false,\n polling_interval: 5,\n configuration_sent: true,\n active: true,\n livestate: 0,\n max_check_attempts: 3,\n last_check: 1532242300.593074,\n type: \"poller\"\n }\n ],\n reactionners: [\n {\n passive: false,\n name: \"reactionner-master\",\n livestate_output: \"reactionner/reactionner-master is up and running.\",\n reachable: true,\n uri: \"http://127.0.0.1:7769/\",\n alive: true,\n realm_name: \"All\",\n manage_sub_realms: true,\n spare: false,\n polling_interval: 5,\n configuration_sent: true,\n active: true,\n livestate: 0,\n max_check_attempts: 3,\n last_check: 1532242300.587762,\n type: \"reactionner\"\n }\n ]\n\n :return: dict containing realms / satellites\n :rtype: dict\n ",
"Get the realm and its children information\n\n :return: None\n "
] |
Please provide a description of the function:def satellites_configuration(self):
res = {}
for s_type in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver',
'broker']:
lst = []
res[s_type] = lst
for daemon in getattr(self.app.conf, s_type + 's'):
cls = daemon.__class__
env = {}
all_props = [cls.properties, cls.running_properties]
for props in all_props:
for prop in props:
if not hasattr(daemon, prop):
continue
if prop in ["realms", "conf", "con", "tags", "modules", "cfg",
"broks", "cfg_to_manage"]:
continue
val = getattr(daemon, prop)
# give a try to a json able object
try:
json.dumps(val)
env[prop] = val
except TypeError as exp:
logger.warning('satellites_configuration, %s: %s', prop, str(exp))
lst.append(env)
return res | [
"Return all the configuration data of satellites\n\n :return: dict containing satellites data\n Output looks like this ::\n\n {'arbiter' : [{'property1':'value1' ..}, {'property2', 'value11' ..}, ..],\n 'scheduler': [..],\n 'poller': [..],\n 'reactionner': [..],\n 'receiver': [..],\n 'broker: [..]'\n }\n\n :rtype: dict\n "
] |
Please provide a description of the function:def external_commands(self):
res = []
with self.app.external_commands_lock:
for cmd in self.app.get_external_commands():
res.append(cmd.serialize())
return res | [
"Get the external commands from the daemon\n\n Use a lock for this function to protect\n\n :return: serialized external command list\n :rtype: str\n "
] |
Please provide a description of the function:def search(self): # pylint: disable=no-self-use
logger.debug("Grafana search... %s", cherrypy.request.method)
if cherrypy.request.method == 'OPTIONS':
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET,POST,PATCH,PUT,DELETE'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.request.handler = None
return {}
if getattr(cherrypy.request, 'json', None):
logger.debug("Posted data: %s", cherrypy.request.json)
logger.debug("Grafana search returns: %s", GRAFANA_TARGETS)
return GRAFANA_TARGETS | [
"\n Request available queries\n\n Posted data: {u'target': u''}\n\n Return the list of available target queries\n\n :return: See upper comment\n :rtype: list\n "
] |
Please provide a description of the function:def query(self):
logger.debug("Grafana query... %s", cherrypy.request.method)
if cherrypy.request.method == 'OPTIONS':
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET,POST,PATCH,PUT,DELETE'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.request.handler = None
return {}
if getattr(cherrypy.request, 'json', None):
posted_data = cherrypy.request.json
logger.debug("Posted data: %s", cherrypy.request.json)
targets = None
target = None
try:
targets = posted_data.get("targets")
assert targets
assert len(targets) == 1
target = targets[0].get("target")
except Exception as exp: # pylint: disable=broad-except
cherrypy.response.status = 409
return {'_status': u'ERR', '_message': u'Request error: %s.' % exp}
resp = []
if target in ['events_log']:
resp = [{
"type": "table",
"columns": [
{
"text": "Time",
"type": "time",
"sort": True,
"desc": True
},
{
"text": "Severity",
"type": "integer"
},
{
"text": "Message",
"type": "string"
}
],
"rows": []
}]
severity = {
"info": 0,
'warning': 1,
'error': 2,
'critical': 3
}
for log in reversed(self.app.recent_events):
# 0 for the first required target
# timestamp must be precise on ms for Grafana
resp[0]['rows'].append([log['timestamp'] * 1000,
severity.get(log['level'].lower(), 3), log['message']])
if target in ['problems_log']:
resp = [{
"type": "table",
"columns": [
{
"text": "Raised",
"type": "time",
"sort": True,
"desc": True
},
{
"text": "Severity",
"type": "integer"
},
{
"text": "Host",
"type": "string"
},
{
"text": "Service",
"type": "string"
},
{
"text": "State",
"type": "integer"
},
{
"text": "Output",
"type": "string"
}
],
"rows": []
}]
severity = {
"up": 0,
'down': 2,
'ok': 0,
'warning': 1,
'critical': 2
}
problems = {}
for scheduler_link in self.app.conf.schedulers:
sched_res = scheduler_link.con.get('monitoring_problems', wait=True)
if 'problems' in sched_res:
problems.update(sched_res['problems'])
# todo: add a sorting
for problem_uuid in problems:
log = problems[problem_uuid]
# 0 for the first required target
resp[0]['rows'].append([log['last_hard_state_change'] * 1000,
severity.get(log['state'].lower(), 3),
log['host'], log['service'], log['state'], log['output']])
return resp | [
"\n Request object passed to datasource.query function:\n\n {\n 'timezone': 'browser',\n 'panelId': 38,\n 'range': {\n 'from': '2018-08-29T02:38:09.633Z',\n 'to': '2018-08-29T03:38:09.633Z',\n 'raw': {'from': 'now-1h', 'to': 'now'}\n },\n 'rangeRaw': {'from': 'now-1h', 'to': 'now'},\n 'interval': '10s',\n 'intervalMs': 10000,\n 'targets': [\n {\n 'target': 'problems', 'refId': 'A', 'type': 'table'}\n ],\n 'format': 'json',\n 'maxDataPoints': 314,\n 'scopedVars': {\n '__interval': {'text': '10s', 'value': '10s'},\n '__interval_ms': {'text': 10000, 'value': 10000}\n }\n }\n\n Only the first target is considered. If several targets are required, an error is raised.\n\n The target is a string that is searched in the target_queries dictionary. If found\n the corresponding query is executed and the result is returned.\n\n Table response from datasource.query. An array of:\n\n [\n {\n \"type\": \"table\",\n \"columns\": [\n {\n \"text\": \"Time\",\n \"type\": \"time\",\n \"sort\": true,\n \"desc\": true,\n },\n {\n \"text\": \"mean\",\n },\n {\n \"text\": \"sum\",\n }\n ],\n \"rows\": [\n [\n 1457425380000,\n null,\n null\n ],\n [\n 1457425370000,\n 1002.76215352,\n 1002.76215352\n ],\n ]\n }\n ]\n :return: See upper comment\n :rtype: list\n "
] |
Please provide a description of the function:def _build_host_livestate(self, host_name, livestate):
# pylint: disable=no-self-use, too-many-locals
state = livestate.get('state', 'UP').upper()
output = livestate.get('output', '')
long_output = livestate.get('long_output', '')
perf_data = livestate.get('perf_data', '')
try:
timestamp = int(livestate.get('timestamp', 'ABC'))
except ValueError:
timestamp = None
host_state_to_id = {
"UP": 0,
"DOWN": 1,
"UNREACHABLE": 2
}
parameters = '%s;%s' % (host_state_to_id.get(state, 3), output)
if long_output and perf_data:
parameters = '%s|%s\n%s' % (parameters, perf_data, long_output)
elif long_output:
parameters = '%s\n%s' % (parameters, long_output)
elif perf_data:
parameters = '%s|%s' % (parameters, perf_data)
command_line = 'PROCESS_HOST_CHECK_RESULT;%s;%s' % (host_name, parameters)
if timestamp is not None:
command_line = '[%d] %s' % (timestamp, command_line)
else:
command_line = '[%d] %s' % (int(time.time()), command_line)
return command_line | [
"Build and notify the external command for an host livestate\n\n PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output>\n\n :param host_name: the concerned host name\n :param livestate: livestate dictionary\n :return: external command line\n "
] |
Please provide a description of the function:def _build_service_livestate(self, host_name, service_name, livestate):
# pylint: disable=no-self-use, too-many-locals
state = livestate.get('state', 'OK').upper()
output = livestate.get('output', '')
long_output = livestate.get('long_output', '')
perf_data = livestate.get('perf_data', '')
try:
timestamp = int(livestate.get('timestamp', 'ABC'))
except ValueError:
timestamp = None
service_state_to_id = {
"OK": 0,
"WARNING": 1,
"CRITICAL": 2,
"UNKNOWN": 3,
"UNREACHABLE": 4
}
parameters = '%s;%s' % (service_state_to_id.get(state, 3), output)
if long_output and perf_data:
parameters = '%s|%s\n%s' % (parameters, perf_data, long_output)
elif long_output:
parameters = '%s\n%s' % (parameters, long_output)
elif perf_data:
parameters = '%s|%s' % (parameters, perf_data)
command_line = 'PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s' % \
(host_name, service_name, parameters)
if timestamp is not None:
command_line = '[%d] %s' % (timestamp, command_line)
else:
command_line = '[%d] %s' % (int(time.time()), command_line)
return command_line | [
"Build and notify the external command for a service livestate\n\n PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output>\n\n Create and post a logcheckresult to the backend for the livestate\n\n :param host_name: the concerned host name\n :param service_name: the concerned service name\n :param livestate: livestate dictionary\n :return: external command line\n "
] |
Please provide a description of the function:def host(self):
# pylint: disable=too-many-branches
logger.debug("Host status...")
if cherrypy.request.method not in ["PATCH", "POST"]:
cherrypy.response.status = 405
return {'_status': 'ERR',
'_error': 'You must only PATCH or POST on this endpoint.'}
# Update an host
# ---
if not cherrypy.request.json:
return {'_status': 'ERR',
'_error': 'You must send parameters on this endpoint.'}
host_name = None
if cherrypy.request.json.get('name', None) is not None:
host_name = cherrypy.request.json.get('name', None)
if not host_name:
return {'_status': 'ERR',
'_error': 'Missing targeted host name.'}
# Get provided data
# ---
logger.debug("Posted data: %s", cherrypy.request.json)
# Check if the host exist in Alignak
# ---
# todo: Not mandatory but it would be clean...
# Prepare response
# ---
ws_result = {'_status': 'OK',
'_result': ['%s is alive :)' % host_name],
'_issues': []}
# Manage the host livestate
# ---
# Alert on unordered livestate if several information exist
now = int(time.time())
livestate = cherrypy.request.json.get('livestate', None)
if not livestate:
# Create an host live state command
livestate = {'state': "UP"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the host '%s' livestate, "
"assuming host is UP!" % host_name)
ls['state'] = 'UP'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the host '%s'. "
"The Alignak scheduler may not handle the check result!",
host_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['UP', 'DOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Host state should be UP, DOWN or UNREACHABLE"
", and not '%s'." % (state))
else:
# Create an host live state command
command = self._build_host_livestate(host_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
services = cherrypy.request.json.get('services', None)
if not services:
return ws_result
for service in services:
service_name = service.get('name', None)
if service_name is None:
ws_result['_issues'].append("A service does not have a 'name' property")
continue
livestate = service.get('livestate', None)
if not livestate:
# Create a service live state command
livestate = {'state': "OK"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the service %s/%s livestate, "
"assuming service is OK!"
% (host_name, service_name))
ls['state'] = 'OK'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the service: %s/%s. "
"The Alignak scheduler may not handle the check result!",
host_name, service_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['OK', 'WARNING', 'CRITICAL', 'UNKNOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Service %s/%s state must be OK, WARNING, "
"CRITICAL, UNKNOWN or UNREACHABLE, and not %s."
% (host_name, service_name, state))
else:
# Create a service live state command
command = self._build_service_livestate(host_name, service_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
return ws_result | [
"Get a passive checks for an host and its services\n\n This function builds the external commands corresponding to the host and services\n provided information\n\n :param host_name: host name\n :param data: dictionary of the host properties to be modified\n :return: command line\n "
] |
Please provide a description of the function:def _wait_new_conf(self):
with self.app.conf_lock:
logger.warning("My master Arbiter wants me to wait for a new configuration.")
self.app.cur_conf = {} | [
"Ask the daemon to drop its configuration and wait for a new one\n\n This overrides the default method from GenericInterface\n\n :return: None\n "
] |
Please provide a description of the function:def _push_configuration(self, pushed_configuration=None):
pushed_configuration = cherrypy.request.json
self.app.must_run = False
return super(ArbiterInterface, self)._push_configuration(
pushed_configuration=pushed_configuration['conf']) | [
"Send a new configuration to the daemon\n\n This overrides the default method from GenericInterface\n\n Used by the master arbiter to send its configuration to a spare arbiter\n\n This function is not intended for external use. It is quite complex to\n build a configuration for a daemon and it is the arbter dispatcher job ;)\n\n :param pushed_configuration: new conf to send\n :return: None\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.