Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def _do_not_run(self):
# If I'm the master, ignore the command and raise a log
if self.app.is_master:
message = "Received message to not run. " \
"I am the Master arbiter, ignore and continue to run."
logger.warning(message)
return {'_status': u'ERR', '_message': message}
# Else, I'm just a spare, so I listen to my master
logger.debug("Received message to not run. I am the spare, stopping.")
self.app.last_master_speak = time.time()
self.app.must_run = False
return {'_status': u'OK', '_message': message} | [
"The master arbiter tells to its spare arbiters to not run.\n\n A master arbiter will ignore this request and it will return an object\n containing some properties:\n '_status': 'ERR' because of the error\n `_message`: some more explanations about the error\n\n :return: None\n "
] |
Please provide a description of the function:def linkify_one_command_with_commands(self, commands, prop):
for i in self:
command = getattr(i, prop, '').strip()
if command:
setattr(i, prop, self.create_commandcall(i, commands, command))
else:
# No defined command
setattr(i, prop, None) | [
"\n Link a command to a property (check_command for example)\n\n :param commands: commands object\n :type commands: alignak.objects.command.Commands\n :param prop: property name\n :type prop: str\n :param default: default command to use if the property is not defined\n :type default: str\n :return: None\n "
] |
Please provide a description of the function:def linkify_command_list_with_commands(self, commands, prop):
for i in self:
if not hasattr(i, prop):
continue
commands_list = strip_and_uniq(getattr(i, prop, ''))
cmds_list = []
for command in commands_list:
if not command:
continue
cmds_list.append(self.create_commandcall(i, commands, command))
setattr(i, prop, cmds_list) | [
"\n Link a command list (commands with , between) in real CommandCalls\n\n :param commands: commands object\n :type commands: alignak.objects.command.Commands\n :param prop: property name\n :type prop: str\n :return: None\n "
] |
Please provide a description of the function:def create_commandcall(prop, commands, command):
cc = {
'commands': commands,
'call': command
}
if hasattr(prop, 'enable_environment_macros'):
cc['enable_environment_macros'] = prop.enable_environment_macros
if hasattr(prop, 'poller_tag'):
cc['poller_tag'] = prop.poller_tag
elif hasattr(prop, 'reactionner_tag'):
cc['reactionner_tag'] = prop.reactionner_tag
return CommandCall(cc) | [
"\n Create CommandCall object with command\n\n :param prop: property\n :type prop: str\n :param commands: all commands\n :type commands: alignak.objects.command.Commands\n :param command: a command object\n :type command: str\n :return: a commandCall object\n :rtype: alignak.objects.commandcallitem.CommandCall\n "
] |
Please provide a description of the function:def _push_broks(self):
data = cherrypy.request.json
with self.app.arbiter_broks_lock:
logger.debug("Pushing %d broks", len(data['broks']))
self.app.arbiter_broks.extend([unserialize(elem, True) for elem in data['broks']]) | [
"Push the provided broks objects to the broker daemon\n\n Only used on a Broker daemon by the Arbiter\n\n :param: broks\n :type: list\n :return: None\n "
] |
Please provide a description of the function:def clean_params(self, params):
clean_p = {}
for elt in params:
elts = elt.split('=', 1)
if len(elts) == 1: # error, there is no = !
self.add_error("the parameter %s is malformed! (no = sign)" % elts[0])
else:
if elts[1] == '':
self.add_warning("the parameter %s is ambiguous! "
"No value after =, assuming an empty string" % elts[0])
clean_p[elts[0]] = elts[1]
return clean_p | [
"Convert a list of parameters (key=value) into a dict\n\n This function is used to transform Nagios (or ini) like formated parameters (key=value)\n to a dictionary.\n\n :param params: parameters list\n :type params: list\n :return: dict with key and value. Log error if malformed\n :rtype: dict\n "
] |
Please provide a description of the function:def load_params(self, params):
logger.debug("Alignak parameters:")
for key, value in sorted(self.clean_params(params).items()):
update_attribute = None
# Maybe it's a variable as $USER$ or $ANOTHERVARIABLE$
# so look at the first character. If it's a $, it is a macro variable
# if it ends with $ too
if key[0] == '$' and key[-1] == '$':
key = key[1:-1]
# Update the macros list
if key not in self.__class__.macros:
logger.debug("New macro %s: %s - %s", self, key, value)
self.__class__.macros[key] = '$%s$' % key
key = '$%s$' % key
logger.debug("- macro %s", key)
update_attribute = value
# Create a new property to store the macro value
if isinstance(value, list):
self.__class__.properties[key] = ListProp(default=value)
else:
self.__class__.properties[key] = StringProp(default=value)
elif key in self.properties:
update_attribute = self.properties[key].pythonize(value)
elif key in self.running_properties:
logger.warning("using a the running property %s in a config file", key)
update_attribute = self.running_properties[key].pythonize(value)
elif key.startswith('$') or key in ['cfg_file', 'cfg_dir']:
# it's a macro or a useless now param, we don't touch this
update_attribute = value
else:
logger.debug("Guessing the property '%s' type because it "
"is not in %s object properties", key, self.__class__.__name__)
update_attribute = ToGuessProp().pythonize(value)
if update_attribute is not None:
setattr(self, key, update_attribute)
logger.debug("- update %s = %s", key, update_attribute)
# Change Nagios2 names to Nagios3 ones (before using them)
self.old_properties_names_to_new()
# Fill default for myself - new properties entry becomes a self attribute
self.fill_default() | [
"Load parameters from main configuration file\n\n :param params: parameters list (converted right at the beginning)\n :type params:\n :return: None\n "
] |
Please provide a description of the function:def _cut_line(line):
# punct = '"#$%&\'()*+/<=>?@[\\]^`{|}~'
if re.search("([\t\n\r]+|[\x0b\x0c ]{3,})+", line):
tmp = re.split("([\t\n\r]+|[\x0b\x0c ]{3,})+", line, 1)
else:
tmp = re.split("[" + string.whitespace + "]+", line, 1)
res = [elt.strip() for elt in tmp if elt.strip() != '']
return res | [
"Split the line on whitespaces and remove empty chunks\n\n :param line: the line to split\n :type line: str\n :return: list of strings\n :rtype: list\n "
] |
Please provide a description of the function:def read_legacy_cfg_files(self, cfg_files, alignak_env_files=None):
# pylint: disable=too-many-nested-blocks,too-many-statements
# pylint: disable=too-many-branches, too-many-locals
cfg_buffer = ''
if not cfg_files:
return cfg_buffer
# Update configuration with the first legacy configuration file name and path
# This will update macro properties
self.alignak_env = 'n/a'
if alignak_env_files is not None:
self.alignak_env = alignak_env_files
if not isinstance(alignak_env_files, list):
self.alignak_env = [os.path.abspath(alignak_env_files)]
else:
self.alignak_env = [os.path.abspath(f) for f in alignak_env_files]
self.main_config_file = os.path.abspath(cfg_files[0])
self.config_base_dir = os.path.dirname(self.main_config_file)
# Universal newline mode (all new lines are managed internally)
res = StringIO(u"# Configuration cfg_files buffer", newline=None)
if not self.read_config_silent and cfg_files:
logger.info("Reading the configuration cfg_files...")
# A first pass to get all the configuration cfg_files in a buffer
for cfg_file in cfg_files:
# Make sure the configuration cfg_files are not repeated...
if os.path.abspath(cfg_file) in self.my_cfg_files:
logger.warning("- ignoring repeated file: %s", os.path.abspath(cfg_file))
continue
self.my_cfg_files.append(os.path.abspath(cfg_file))
# File header
res.write(u"\n")
res.write(u"# imported_from=%s" % cfg_file)
res.write(u"\n")
if not self.read_config_silent:
logger.info("- opening '%s' configuration file", cfg_file)
try:
# Open in Universal way for Windows, Mac, Linux-based systems
file_d = open(cfg_file, 'r')
buf = file_d.readlines()
file_d.close()
except IOError as exp:
self.add_error("cannot open main file '%s' for reading: %s" % (cfg_file, exp))
continue
for line in buf:
try:
line = line.decode('utf8', 'replace')
except AttributeError:
# Python 3 will raise an exception because the line is still unicode
pass
line = line.strip()
res.write(line)
res.write(u"\n")
if (re.search("^cfg_file", line) or re.search("^resource_file", line)) \
and '=' in line:
elts = line.split('=', 1)
if os.path.isabs(elts[1]):
cfg_file_name = elts[1]
else:
cfg_file_name = os.path.join(self.config_base_dir, elts[1])
cfg_file_name = cfg_file_name.strip()
cfg_file_name = os.path.abspath(cfg_file_name)
# Make sure the configuration cfg_files are not repeated...
if cfg_file_name in self.my_cfg_files:
logger.warning("- ignoring repeated file: %s", cfg_file_name)
else:
self.my_cfg_files.append(cfg_file_name)
if not self.read_config_silent:
logger.info(" reading: %s", cfg_file_name)
try:
# Read the file content to the buffer
file_d = open(cfg_file_name, 'r')
# File header
res.write(u"\n")
res.write(u"# imported_from=%s" % cfg_file_name)
res.write(u"\n")
content = file_d.read()
try:
content = content.decode('utf8', 'replace')
except AttributeError:
# Python 3 will raise an exception
pass
res.write(content)
res.write(u"\n")
file_d.close()
except IOError as exp:
self.add_error(u"cannot open file '%s' for reading: %s"
% (cfg_file_name, exp))
elif re.search("^cfg_dir", line) and '=' in line:
elts = line.split('=', 1)
if os.path.isabs(elts[1]):
cfg_dir_name = elts[1]
else:
cfg_dir_name = os.path.join(self.config_base_dir, elts[1])
# Ok, look if it's really a directory
if not os.path.isdir(cfg_dir_name):
self.add_error(u"cannot open directory '%s' for reading" % cfg_dir_name)
continue
# Now walk for it.
for root, _, walk_files in os.walk(cfg_dir_name, followlinks=True):
for found_file in walk_files:
if not re.search(r"\.cfg$", found_file):
continue
cfg_file_name = os.path.join(root, found_file)
# Make sure the configuration cfg_files are not repeated...
if os.path.abspath(cfg_file_name) in self.my_cfg_files:
logger.warning("- ignoring repeated file: %s", cfg_file_name)
else:
self.my_cfg_files.append(cfg_file_name)
if not self.read_config_silent:
logger.info(" reading: %s", cfg_file_name)
try:
# Read the file content to the buffer
file_d = open(cfg_file_name, 'r')
# File header
res.write(u"\n")
res.write(u"# imported_from=%s" % cfg_file_name)
res.write(u"\n")
content = file_d.read()
try:
content = content.decode('utf8', 'replace')
except AttributeError:
# Python 3 will raise an exception
pass
res.write(content)
res.write(u"\n")
file_d.close()
except IOError as exp:
self.add_error(u"cannot open file '%s' for reading: %s"
% (cfg_file_name, exp))
cfg_buffer = res.getvalue()
res.close()
return cfg_buffer | [
"Read and parse the Nagios legacy configuration files\n and store their content into a StringIO object which content\n will be returned as the function result\n\n :param cfg_files: list of file to read\n :type cfg_files: list\n :param alignak_env_files: name of the alignak environment file\n :type alignak_env_files: list\n :return: a buffer containing all files\n :rtype: str\n "
] |
Please provide a description of the function:def read_config_buf(self, cfg_buffer):
# pylint: disable=too-many-locals, too-many-branches
objects = {}
if not self.read_config_silent:
if cfg_buffer:
logger.info("Parsing the legacy configuration files...")
else:
logger.info("No legacy configuration files.")
return objects
params = []
objectscfg = {}
for o_type in self.__class__.configuration_types:
objectscfg[o_type] = []
tmp = []
tmp_type = 'void'
in_define = False
almost_in_define = False
continuation_line = False
tmp_line = ''
lines = cfg_buffer.split('\n')
line_nb = 0 # Keep the line number for the file path
filefrom = ''
for line in lines:
if line.startswith("# imported_from="):
filefrom = line.split('=')[1]
line_nb = 0 # reset the line number too
if not self.read_config_silent:
logger.debug("#####\n# file: %s", filefrom)
continue
if not self.read_config_silent:
logger.debug("- %d: %s", line_nb, line)
line_nb += 1
# Remove comments
line = split_semicolon(line)[0].strip()
# A backslash means, there is more to come
if re.search(r"\\\s*$", line) is not None:
continuation_line = True
line = re.sub(r"\\\s*$", "", line)
line = re.sub(r"^\s+", " ", line)
tmp_line += line
continue
elif continuation_line:
# Now the continuation line is complete
line = re.sub(r"^\s+", "", line)
line = tmp_line + line
tmp_line = ''
continuation_line = False
# } alone in a line means stop the object reading
if re.search(r"^\s*}\s*$", line) is not None:
in_define = False
# { alone in a line can mean start object reading
if re.search(r"^\s*\{\s*$", line) is not None and almost_in_define:
almost_in_define = False
in_define = True
continue
if re.search(r"^\s*#|^\s*$|^\s*}", line) is not None:
pass
# A define must be catched and the type saved
# The old entry must be saved before
elif re.search("^define", line) is not None:
if re.search(r".*\{.*$", line) is not None: # pylint: disable=R0102
in_define = True
else:
almost_in_define = True
if tmp_type not in objectscfg:
objectscfg[tmp_type] = []
objectscfg[tmp_type].append(tmp)
tmp = []
tmp.append("imported_from %s:%s" % (filefrom, line_nb))
# Get new type
elts = re.split(r'\s', line)
# Maybe there was space before and after the type
# so we must get all and strip it
tmp_type = ' '.join(elts[1:]).strip()
tmp_type = tmp_type.split('{')[0].strip()
else:
if in_define:
tmp.append(line)
else:
params.append(line)
# Maybe the type of the last element is unknown, declare it
if tmp_type not in objectscfg:
objectscfg[tmp_type] = []
objectscfg[tmp_type].append(tmp)
# Check and load the parameters
self.load_params(params)
for o_type in objectscfg:
objects[o_type] = []
for items in objectscfg[o_type]:
tmp_obj = {}
for line in items:
elts = self._cut_line(line)
if elts == []:
continue
prop = elts[0]
if prop not in tmp_obj:
tmp_obj[prop] = []
value = ' '.join(elts[1:])
tmp_obj[prop].append(value)
if tmp_obj != {}:
# Create a new object
objects[o_type].append(tmp_obj)
return objects | [
"The legacy configuration buffer (previously returned by Config.read_config())\n\n If the buffer is empty, it will return an empty dictionary else it will return a\n dictionary containing dictionary items tha tmay be used to create Alignak\n objects\n\n :param cfg_buffer: buffer containing all data from config files\n :type cfg_buffer: str\n :return: dict of alignak objects with the following structure ::\n { type1 : [{key: value, ..}, {..}],\n type2 : [ ... ]\n }\n\n Example ::\n\n { 'host' : [{'host_name': 'myhostname', ..}, {..}],\n 'service' : [ ... ]\n }\n\n Values are all str for now. It is pythonized at object creation\n\n :rtype: dict\n "
] |
Please provide a description of the function:def add_self_defined_objects(raw_objects):
logger.info("- creating internally defined commands...")
if 'command' not in raw_objects:
raw_objects['command'] = []
# Business rule
raw_objects['command'].append({
'command_name': 'bp_rule',
'command_line': 'bp_rule',
'imported_from': 'alignak-self'
})
# Internal host checks
raw_objects['command'].append({
'command_name': '_internal_host_up',
'command_line': '_internal_host_up',
'imported_from': 'alignak-self'
})
raw_objects['command'].append({
'command_name': '_internal_host_check',
# Command line must contain: state_id;output
'command_line': '_internal_host_check;$ARG1$;$ARG2$',
'imported_from': 'alignak-self'
})
# Internal service check
raw_objects['command'].append({
'command_name': '_echo',
'command_line': '_echo',
'imported_from': 'alignak-self'
})
raw_objects['command'].append({
'command_name': '_internal_service_check',
# Command line must contain: state_id;output
'command_line': '_internal_service_check;$ARG1$;$ARG2$',
'imported_from': 'alignak-self'
}) | [
"Add self defined command objects for internal processing ;\n bp_rule, _internal_host_up, _echo, _internal_host_check, _interna_service_check\n\n :param raw_objects: Raw config objects dict\n :type raw_objects: dict\n :return: raw_objects with some more commands\n :rtype: dict\n "
] |
Please provide a description of the function:def early_create_objects(self, raw_objects):
types_creations = self.__class__.types_creations
early_created_types = self.__class__.early_created_types
logger.info("Creating objects...")
for o_type in sorted(types_creations):
if o_type in early_created_types:
self.create_objects_for_type(raw_objects, o_type)
logger.info("Done") | [
"Create the objects needed for the post configuration file initialization\n\n :param raw_objects: dict with all object with str values\n :type raw_objects: dict\n :return: None\n "
] |
Please provide a description of the function:def create_objects(self, raw_objects):
types_creations = self.__class__.types_creations
early_created_types = self.__class__.early_created_types
logger.info("Creating objects...")
# Before really creating the objects, we add some ghost
# ones like the bp_rule for correlation
self.add_self_defined_objects(raw_objects)
for o_type in sorted(types_creations):
if o_type not in early_created_types:
self.create_objects_for_type(raw_objects, o_type)
logger.info("Done") | [
"Create all the objects got after the post configuration file initialization\n\n :param raw_objects: dict with all object with str values\n :type raw_objects: dict\n :return: None\n "
] |
Please provide a description of the function:def create_objects_for_type(self, raw_objects, o_type):
# Ex: the above code do for timeperiods:
# timeperiods = []
# for timeperiodcfg in objects['timeperiod']:
# t = Timeperiod(timeperiodcfg)
# timeperiods.append(t)
# self.timeperiods = Timeperiods(timeperiods)
types_creations = self.__class__.types_creations
(cls, clss, prop, initial_index, _) = types_creations[o_type]
# List to store the created objects
lst = []
try:
logger.info("- creating '%s' objects", o_type)
for obj_cfg in raw_objects[o_type]:
# We create the object
my_object = cls(obj_cfg)
# and append it to the list
lst.append(my_object)
if not lst:
logger.info(" none.")
except KeyError:
logger.info(" no %s objects in the configuration", o_type)
# Create the objects list and set it in our properties
setattr(self, prop, clss(lst, initial_index)) | [
"Generic function to create objects regarding the o_type\n\n This function create real Alignak objects from the raw data got from the configuration.\n\n :param raw_objects: Raw objects\n :type raw_objects: dict\n :param o_type: the object type we want to create\n :type o_type: object\n :return: None\n "
] |
Please provide a description of the function:def early_arbiter_linking(self, arbiter_name, params):
if not self.arbiters:
params.update({
'name': arbiter_name, 'arbiter_name': arbiter_name,
'host_name': socket.gethostname(),
'address': '127.0.0.1', 'port': 7770,
'spare': '0'
})
logger.warning("There is no arbiter, I add myself (%s) reachable on %s:%d",
arbiter_name, params['address'], params['port'])
arb = ArbiterLink(params, parsing=True)
self.arbiters = ArbiterLinks([arb])
# First fill default
self.arbiters.fill_default()
self.modules.fill_default()
self.arbiters.linkify(modules=self.modules)
self.modules.linkify() | [
" Prepare the arbiter for early operations\n\n :param arbiter_name: default arbiter name if no arbiter exist in the configuration\n :type arbiter_name: str\n :return: None\n "
] |
Please provide a description of the function:def linkify_one_command_with_commands(self, commands, prop):
if not hasattr(self, prop):
return
command = getattr(self, prop).strip()
if not command:
setattr(self, prop, None)
return
data = {"commands": commands, "call": command}
if hasattr(self, 'poller_tag'):
data.update({"poller_tag": self.poller_tag})
if hasattr(self, 'reactionner_tag'):
data.update({"reactionner_tag": self.reactionner_tag})
setattr(self, prop, CommandCall(data)) | [
"\n Link a command\n\n :param commands: object commands\n :type commands: object\n :param prop: property name\n :type prop: str\n :return: None\n "
] |
Please provide a description of the function:def linkify(self):
self.services.optimize_service_search(self.hosts)
# First linkify myself like for some global commands
self.linkify_one_command_with_commands(self.commands, 'host_perfdata_command')
self.linkify_one_command_with_commands(self.commands, 'service_perfdata_command')
self.linkify_one_command_with_commands(self.commands, 'global_host_event_handler')
self.linkify_one_command_with_commands(self.commands, 'global_service_event_handler')
# link hosts with timeperiods and commands
self.hosts.linkify(self.timeperiods, self.commands,
self.contacts, self.realms,
self.resultmodulations, self.businessimpactmodulations,
self.escalations, self.hostgroups,
self.checkmodulations, self.macromodulations)
self.hostsextinfo.merge(self.hosts)
# Do the simplify AFTER explode groups
# link hostgroups with hosts
self.hostgroups.linkify(self.hosts, self.realms, self.forced_realms_hostgroups)
# link services with other objects
self.services.linkify(self.hosts, self.commands,
self.timeperiods, self.contacts,
self.resultmodulations, self.businessimpactmodulations,
self.escalations, self.servicegroups,
self.checkmodulations, self.macromodulations)
self.servicesextinfo.merge(self.services)
# link servicegroups members with services
self.servicegroups.linkify(self.hosts, self.services)
# link notificationways with timeperiods and commands
self.notificationways.linkify(self.timeperiods, self.commands)
# link notificationways with timeperiods and commands
self.checkmodulations.linkify(self.timeperiods, self.commands)
# Link with timeperiods
self.macromodulations.linkify(self.timeperiods)
# link contacgroups with contacts
self.contactgroups.linkify(self.contacts)
# link contacts with timeperiods and commands
self.contacts.linkify(self.commands, self.notificationways)
# link timeperiods with timeperiods (exclude part)
self.timeperiods.linkify()
self.servicedependencies.linkify(self.hosts, self.services,
self.timeperiods)
self.hostdependencies.linkify(self.hosts, self.timeperiods)
self.resultmodulations.linkify(self.timeperiods)
self.businessimpactmodulations.linkify(self.timeperiods)
self.escalations.linkify(self.timeperiods, self.contacts,
self.services, self.hosts)
# Link all satellite links with modules
self.schedulers.linkify(self.modules)
self.brokers.linkify(self.modules)
self.receivers.linkify(self.modules)
self.reactionners.linkify(self.modules)
self.pollers.linkify(self.modules)
# Ok, now update all realms with back links of satellites
satellites = {}
for sat in self.schedulers:
satellites[sat.uuid] = sat
for sat in self.pollers:
satellites[sat.uuid] = sat
for sat in self.reactionners:
satellites[sat.uuid] = sat
for sat in self.receivers:
satellites[sat.uuid] = sat
for sat in self.brokers:
satellites[sat.uuid] = sat
self.realms.prepare_satellites(satellites) | [
" Make 'links' between elements, like a host got a services list\n with all its services in it\n\n :return: None\n "
] |
Please provide a description of the function:def clean(self):
logger.debug("Cleaning configuration objects before configuration sending:")
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, _, inner_property, _, _) = types_creations[o_type]
logger.debug(" . for %s", inner_property, )
inner_object = getattr(self, inner_property)
inner_object.clean() | [
"Wrapper for calling the clean method of services attribute\n\n :return: None\n "
] |
Please provide a description of the function:def warn_about_unmanaged_parameters(self):
properties = self.__class__.properties
unmanaged = []
for prop, entry in list(properties.items()):
if not entry.managed and hasattr(self, prop):
if entry.help:
line = "%s: %s" % (prop, entry.help)
else:
line = prop
unmanaged.append(line)
if unmanaged:
logger.warning("The following Nagios legacy parameter(s) are not currently "
"managed by Alignak:")
for line in unmanaged:
logger.warning('- %s', line)
logger.warning("Those are unmanaged configuration statements, do you really need it? "
"Create an issue on the Alignak repository or submit a pull "
"request: http://www.github.com/Alignak-monitoring/alignak") | [
"used to raise warning if the user got parameter\n that we do not manage from now\n\n :return: None\n "
] |
Please provide a description of the function:def explode(self):
# first elements, after groups
self.contacts.explode(self.contactgroups, self.notificationways)
self.contactgroups.explode()
self.hosts.explode(self.hostgroups, self.contactgroups)
self.hostgroups.explode()
self.services.explode(self.hosts, self.hostgroups, self.contactgroups,
self.servicegroups, self.servicedependencies)
self.servicegroups.explode()
self.timeperiods.explode()
self.hostdependencies.explode(self.hostgroups)
self.servicedependencies.explode(self.hostgroups)
# Serviceescalations hostescalations will create new escalations
self.serviceescalations.explode(self.escalations)
self.hostescalations.explode(self.escalations)
self.escalations.explode(self.hosts, self.hostgroups, self.contactgroups)
# Now the architecture part
self.realms.explode() | [
"Use to fill groups values on hosts and create new services\n (for host group ones)\n\n :return: None\n "
] |
Please provide a description of the function:def apply_dependencies(self):
self.hosts.apply_dependencies()
self.services.apply_dependencies(self.hosts) | [
"Creates dependencies links between elements.\n\n :return: None\n "
] |
Please provide a description of the function:def apply_inheritance(self):
# inheritance properties by template
self.hosts.apply_inheritance()
self.contacts.apply_inheritance()
self.services.apply_inheritance()
self.servicedependencies.apply_inheritance()
self.hostdependencies.apply_inheritance()
# Also timeperiods
self.timeperiods.apply_inheritance()
# Also "Hostextinfo"
self.hostsextinfo.apply_inheritance()
# Also "Serviceextinfo"
self.servicesextinfo.apply_inheritance()
# Now escalations too
self.serviceescalations.apply_inheritance()
self.hostescalations.apply_inheritance()
self.escalations.apply_inheritance() | [
"Apply inheritance over templates\n Template can be used in the following objects::\n\n * hosts\n * contacts\n * services\n * servicedependencies\n * hostdependencies\n * timeperiods\n * hostsextinfo\n * servicesextinfo\n * serviceescalations\n * hostescalations\n * escalations\n\n :return: None\n "
] |
Please provide a description of the function:def fill_default_configuration(self):
logger.debug("Filling the unset properties with their default value:")
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, _, inner_property, _, _) = types_creations[o_type]
# Not yet for the realms and daemons links
if inner_property in ['realms', 'arbiters', 'schedulers', 'reactionners',
'pollers', 'brokers', 'receivers']:
continue
logger.debug(" . for %s", inner_property,)
inner_object = getattr(self, inner_property, None)
if inner_object is None:
logger.debug("No %s to fill with default values", inner_property)
continue
inner_object.fill_default()
# We have all monitored elements, we can create a default realm if none is defined
if getattr(self, 'realms', None) is not None:
self.fill_default_realm()
self.realms.fill_default()
# Then we create missing satellites, so no other satellites will be created after
self.fill_default_satellites(self.launch_missing_daemons)
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, _, inner_property, _, _) = types_creations[o_type]
if getattr(self, inner_property, None) is None:
logger.debug("No %s to fill with default values", inner_property)
continue
# Only for the daemons links
if inner_property in ['schedulers', 'reactionners', 'pollers', 'brokers', 'receivers']:
logger.debug(" . for %s", inner_property,)
inner_object = getattr(self, inner_property)
inner_object.fill_default()
# Now fill some fields we can predict (like address for hosts)
self.hosts.fill_predictive_missing_parameters()
self.services.fill_predictive_missing_parameters() | [
"Fill objects properties with default value if necessary\n\n :return: None\n "
] |
Please provide a description of the function:def fill_default_realm(self):
if not getattr(self, 'realms', None):
# Create a default realm so all hosts without realm will be linked with it
default = Realm({
'realm_name': u'All', 'alias': u'Self created default realm', 'default': '1'
})
self.realms = Realms([default])
logger.warning("No realms defined, I am adding one as %s", default.get_name())
# Check that a default realm (and only one) is defined and get this default realm
self.realms.fill_default() | [
"Check if a realm is defined, if not\n Create a new one (default) and tag everyone that do not have\n a realm prop to be put in this realm\n\n :return: None\n "
] |
Please provide a description of the function:def log_daemons_list(self):
daemons = [self.arbiters, self.schedulers, self.pollers,
self.brokers, self.reactionners, self.receivers]
for daemons_list in daemons:
if not daemons_list:
logger.debug("- %ss: None", daemons_list.inner_class.my_type)
else:
logger.debug("- %ss: %s", daemons_list.inner_class.my_type,
','.join([daemon.get_name() for daemon in daemons_list])) | [
"Log Alignak daemons list\n\n :return:\n "
] |
Please provide a description of the function:def fill_default_satellites(self, alignak_launched=False):
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
# Log all satellites list
logger.debug("Alignak configured daemons list:")
self.log_daemons_list()
# We must create relations betweens the realms first. This is necessary to have
# an accurate map of the situation!
self.realms.linkify()
self.realms.get_default(check=True)
# Get list of known realms
# realms_names = [realm.get_name() for realm in self.realms]
# Create one instance of each satellite type if it does not exist...
if not self.schedulers:
logger.warning("No scheduler defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = SchedulerLink({'type': 'scheduler', 'name': 'Default-Scheduler',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.schedulers = SchedulerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.reactionners:
logger.warning("No reactionner defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = ReactionnerLink({'type': 'reactionner', 'name': 'Default-Reactionner',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.reactionners = ReactionnerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.pollers:
logger.warning("No poller defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = PollerLink({'type': 'poller', 'name': 'Default-Poller',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.pollers = PollerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.brokers:
logger.warning("No broker defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = BrokerLink({'type': 'broker', 'name': 'Default-Broker',
'realm': self.realms.default.get_name(),
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.brokers = BrokerLinks([satellite])
self.missing_daemons.append(satellite)
if not self.receivers:
logger.warning("No receiver defined, I am adding one on 127.0.0.1:%d",
self.daemons_initial_port)
satellite = ReceiverLink({'type': 'receiver', 'name': 'Default-Receiver',
'alignak_launched': alignak_launched,
'missing_daemon': True,
'spare': '0', 'manage_sub_realms': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port})
self.daemons_initial_port = self.daemons_initial_port + 1
self.receivers = ReceiverLinks([satellite])
self.missing_daemons.append(satellite)
# Assign default realm to the satellites that do not have a defined realm
for satellites_list in [self.pollers, self.brokers, self.reactionners,
self.receivers, self.schedulers]:
for satellite in satellites_list:
# Here the 'realm' property is not yet a real realm object uuid ...
# but still a realm name! Make it a realm uuid
if not getattr(satellite, 'realm', None):
satellite.realm = self.realms.default.get_name()
sat_realm = self.realms.find_by_name(satellite.realm)
if not sat_realm:
self.add_error("The %s '%s' is affected to an unknown realm: '%s'"
% (satellite.type, satellite.name, satellite.realm))
continue
# satellite.realm_name = sat_realm.get_name()
logger.info("Tagging satellite '%s' with realm %s", satellite.name, satellite.realm)
satellite.realm = sat_realm.uuid
satellite.realm_name = sat_realm.get_name()
# Alert for spare daemons
if getattr(satellite, 'spare', False):
self.add_warning("The %s '%s' is declared as a spare daemon. "
"Spare mode is not yet implemented and it will be ignored."
% (satellite.type, satellite.name))
continue
# Alert for non active daemons
if not getattr(satellite, 'active', False):
self.add_warning("The %s '%s' is declared as a non active daemon. "
"It will be ignored."
% (satellite.type, satellite.name))
continue
# And tell the realm that it knows the satellite
realm_satellites = getattr(sat_realm, '%ss' % satellite.type)
if satellite.uuid not in realm_satellites:
realm_satellites.append(satellite.uuid)
# If the satellite manages sub realms...
# We update the "potential_" satellites that may be used for this realm
if satellite.manage_sub_realms:
for realm_uuid in sat_realm.all_sub_members:
logger.debug("Linkify %s '%s' with realm %s",
satellite.type, satellite.name,
self.realms[realm_uuid].get_name())
realm_satellites = getattr(self.realms[realm_uuid],
'potential_%ss' % satellite.type)
if satellite.uuid not in realm_satellites:
realm_satellites.append(satellite.uuid)
# Parse hosts for realms and set host in the default realm if no realm is set
hosts_realms_names = set()
logger.debug("Hosts realm configuration:")
for host in self.hosts:
if not getattr(host, 'realm', None):
# todo: perharps checking hostgroups realm (if any) to set an hostgroup realm
# rather than the default realm
logger.debug("Host: %s, realm: %s, hostgroups: %s",
host.get_name(), host.realm, host.hostgroups)
host.realm = self.realms.default.get_name()
host.got_default_realm = True
host_realm = self.realms.find_by_name(host.realm)
if not host_realm:
self.add_error("The host '%s' is affected to an unknown realm: '%s'"
% (host.get_name(), host.realm))
continue
host.realm_name = host_realm.get_name()
host_realm.add_members(host.get_name())
logger.debug("- tagging host '%s' with realm %s", host.get_name(), host.realm_name)
hosts_realms_names.add(host.realm_name)
logger.debug(" - %s: realm %s, active %s, passive %s",
host.get_name(), host_realm.get_name(),
host.active_checks_enabled, host.passive_checks_enabled)
host_realm.passively_checked_hosts = \
host_realm.passively_checked_hosts or host.passive_checks_enabled
host_realm.actively_checked_hosts = \
host_realm.actively_checked_hosts or host.passive_checks_enabled
hosts_realms_names.add(host.realm)
# Parse hostgroups for realms and set hostgroup in the default realm if no realm is set
hostgroups_realms_names = set()
logger.debug("Hostgroups realm configuration:")
for hostgroup in self.hostgroups:
if not getattr(hostgroup, 'realm', None):
hostgroup.realm = self.realms.default.get_name()
hostgroup.got_default_realm = True
hostgroup_realm = self.realms.find_by_name(hostgroup.realm)
if not hostgroup_realm:
self.add_error("The hostgroup '%s' is affected to an unknown realm: '%s'"
% (hostgroup.get_name(), hostgroup.realm))
continue
hostgroup.realm_name = hostgroup_realm.get_name()
hostgroup_realm.add_group_members(hostgroup.get_name())
logger.debug("- tagging hostgroup '%s' with realm %s",
hostgroup.get_name(), hostgroup.realm_name)
hostgroups_realms_names.add(hostgroup.realm_name)
# Check that all daemons and realms are coherent
for satellites_list in [self.pollers, self.brokers, self.reactionners,
self.receivers, self.schedulers]:
sat_class = satellites_list.inner_class
# Collect the names of all the realms that are managed by all the satellites
sat_realms_names = set()
for satellite in satellites_list:
for realm in self.realms:
realm_satellites = getattr(realm, '%ss' % satellite.type)
realm_potential_satellites = getattr(realm, 'potential_%ss' % satellite.type)
if satellite.uuid in realm_satellites or \
satellite.uuid in realm_potential_satellites:
sat_realms_names.add(realm.get_name())
if not hosts_realms_names.issubset(sat_realms_names):
# Check if a daemon is able to manage the concerned hosts...
for realm_name in hosts_realms_names.difference(sat_realms_names):
realm = self.realms.find_by_name(realm_name)
self.add_warning("Some hosts exist in the realm '%s' but no %s is "
"defined for this realm." % (realm_name, sat_class.my_type))
if not alignak_launched:
continue
# Add a self-generated daemon
logger.warning("Adding a %s for the realm: %s", satellite.type, realm_name)
new_daemon = sat_class({
'type': satellite.type, 'name': '%s-%s' % (satellite.type, realm_name),
'alignak_launched': True, 'missing_daemon': True,
'realm': realm.uuid, 'manage_sub_realms': '0', 'spare': '0',
'address': '127.0.0.1', 'port': self.daemons_initial_port
})
satellites_list.add_item(new_daemon)
# And tell the realm that it knows the satellite
realm_satellites = getattr(realm, '%ss' % satellite.type)
if new_daemon.uuid not in realm_satellites:
realm_satellites.append(new_daemon.uuid)
self.add_warning("Added a %s (%s, %s) for the realm '%s'"
% (satellite.type, '%s-%s' % (satellite.type, realm_name),
satellite.uri, realm_name))
self.daemons_initial_port = self.daemons_initial_port + 1
self.missing_daemons.append(new_daemon)
logger.debug("Realms hosts configuration:")
for realm in self.realms:
logger.debug("Realm: %s, actively checked hosts %s, passively checked hosts %s",
realm.get_name(), realm.actively_checked_hosts,
realm.passively_checked_hosts)
logger.info("Realm: %s, hosts: %s, groups: %s",
realm.get_name(), realm.members, realm.group_members)
# Log all satellites list
logger.debug("Alignak definitive daemons list:")
self.log_daemons_list() | [
"If a required satellite is missing in the configuration, we create a new satellite\n on localhost with some default values\n\n :param alignak_launched: created daemons are to be launched or not\n :type alignak_launched: bool\n :return: None\n "
] |
Please provide a description of the function:def got_broker_module_type_defined(self, module_type):
for broker_link in self.brokers:
for module in broker_link.modules:
if module.is_a_module(module_type):
return True
return False | [
"Check if a module type is defined in one of the brokers\n\n :param module_type: module type to search for\n :type module_type: str\n :return: True if mod_type is found else False\n :rtype: bool\n "
] |
Please provide a description of the function:def got_scheduler_module_type_defined(self, module_type):
for scheduler_link in self.schedulers:
for module in scheduler_link.modules:
if module.is_a_module(module_type):
return True
return False | [
"Check if a module type is defined in one of the schedulers\n\n :param module_type: module type to search for\n :type module_type: str\n :return: True if mod_type is found else False\n :rtype: bool\n TODO: Factorize it with got_broker_module_type_defined\n "
] |
Please provide a description of the function:def got_arbiter_module_type_defined(self, module_type):
for arbiter in self.arbiters:
# Do like the linkify will do after....
for module in getattr(arbiter, 'modules', []):
# So look at what the arbiter try to call as module
module_name = module.get_name()
# Ok, now look in modules...
for mod in self.modules:
# try to see if this module is the good type
if getattr(mod, 'python_name', '').strip() == module_type.strip():
# if so, the good name?
if getattr(mod, 'name', '').strip() == module_name:
return True
return False | [
"Check if a module type is defined in one of the arbiters\n Also check the module name\n\n :param module_type: module type to search for\n :type module_type: str\n :return: True if mod_type is found else False\n :rtype: bool\n TODO: Factorize it with got_broker_module_type_defined:\n "
] |
Please provide a description of the function:def create_business_rules(self):
self.hosts.create_business_rules(self.hosts, self.services,
self.hostgroups, self.servicegroups,
self.macromodulations, self.timeperiods)
self.services.create_business_rules(self.hosts, self.services,
self.hostgroups, self.servicegroups,
self.macromodulations, self.timeperiods) | [
"Create business rules for hosts and services\n\n :return: None\n "
] |
Please provide a description of the function:def create_business_rules_dependencies(self):
for item in itertools.chain(self.hosts, self.services):
if not item.got_business_rule:
continue
bp_items = item.business_rule.list_all_elements()
for bp_item_uuid in bp_items:
if bp_item_uuid in self.hosts:
bp_item = self.hosts[bp_item_uuid]
notif_options = item.business_rule_host_notification_options
else: # We have a service
bp_item = self.services[bp_item_uuid]
notif_options = item.business_rule_service_notification_options
if notif_options:
bp_item.notification_options = notif_options
bp_item.act_depend_of_me.append((item.uuid, ['d', 'u', 's', 'f', 'c', 'w', 'x'],
'', True))
# TODO: Is it necessary? We already have this info in act_depend_* attributes
item.parent_dependencies.add(bp_item.uuid)
bp_item.child_dependencies.add(item.uuid) | [
"Create business rules dependencies for hosts and services\n\n :return: None\n "
] |
Please provide a description of the function:def hack_old_nagios_parameters(self):
# pylint: disable=too-many-branches
modules = []
# For status_dat
if getattr(self, 'status_file', None) and getattr(self, 'object_cache_file', None):
msg = "The configuration parameters '%s = %s' and '%s = %s' are deprecated " \
"and will be ignored. Please configure your external 'retention' module " \
"as expected." % \
('status_file', self.status_file,
'object_cache_file', self.object_cache_file)
logger.warning(msg)
self.add_warning(msg)
# Now the log_file
if getattr(self, 'log_file', None):
msg = "The configuration parameter '%s = %s' is deprecated " \
"and will be ignored. Please configure your external 'logs' module " \
"as expected." % \
('log_file', self.log_file)
logger.warning(msg)
self.add_warning(msg)
# Now the syslog facility
if getattr(self, 'use_syslog', None):
msg = "The configuration parameter '%s = %s' is deprecated " \
"and will be ignored. Please configure your external 'logs' module " \
"as expected." % \
('use_syslog', self.use_syslog)
logger.warning(msg)
self.add_warning(msg)
# Now the host_perfdata or service_perfdata module
if getattr(self, 'service_perfdata_file', None) or \
getattr(self, 'host_perfdata_file', None):
msg = "The configuration parameters '%s = %s' and '%s = %s' are Nagios legacy " \
"parameters. Alignak will use its inner 'metrics' module " \
"to match the expected behavior." \
% ('host_perfdata_file', self.host_perfdata_file,
'service_perfdata_file', self.service_perfdata_file)
logger.warning(msg)
self.add_warning(msg)
mod_configuration = {
'name': 'inner-metrics',
'type': 'metrics',
'python_name': 'alignak.modules.inner_metrics',
'imported_from': 'inner',
'enabled': True
}
if getattr(self, 'host_perfdata_file', None):
mod_configuration['host_perfdata_file'] = \
getattr(self, 'host_perfdata_file')
if getattr(self, 'service_perfdata_file', None):
mod_configuration['service_perfdata_file'] = \
getattr(self, 'service_perfdata_file')
logger.debug("inner metrics module, configuration: %s", mod_configuration)
modules.append((
'broker', mod_configuration
))
# Now the Nagios legacy retention file module
if hasattr(self, 'retain_state_information') and self.retain_state_information:
# Do not raise a warning log for this, only an information
msg = "The configuration parameter '%s = %s' is a Nagios legacy " \
"parameter. Alignak will use its inner 'retention' module " \
"to match the expected behavior." \
% ('retain_state_information', self.retain_state_information)
logger.info(msg)
# self.add_warning(msg)
mod_configuration = {
'name': 'inner-retention',
'type': 'retention',
'python_name': 'alignak.modules.inner_retention',
'imported_from': 'inner',
'enabled': True
}
if getattr(self, 'state_retention_file', None) is not None:
mod_configuration['retention_file'] = getattr(self, 'state_retention_file')
if getattr(self, 'state_retention_dir', None) is not None:
mod_configuration['retention_dir'] = getattr(self, 'state_retention_dir')
if getattr(self, 'retention_update_interval', None):
self.tick_update_retention = int(self.retention_update_interval) * 60
mod_configuration['retention_period'] = int(self.retention_update_interval) * 60
logger.debug("inner retention module, configuration: %s", mod_configuration)
modules.append((
'scheduler', mod_configuration
))
# Now the command_file
if hasattr(self, 'command_file') and getattr(self, 'command_file'):
msg = "The configuration parameter '%s = %s' is deprecated " \
"and will be ignored. Please configure an external commands capable " \
"module as expected (eg external-commands, NSCA, or WS module may suit." \
% ('command_file', self.command_file)
logger.warning(msg)
self.add_warning(msg)
return modules | [
" Check if modules exist for some of the Nagios legacy parameters.\n\n If no module of the required type is present, it alerts the user that the parameters will\n be ignored and the functions will be disabled, else it encourages the user to set the\n correct parameters in the installed modules.\n\n Note that some errors are raised if some parameters are used and no module is found\n to manage the corresponding feature.\n\n TODO: clean this part of the configuration checking! Nagios ascending compatibility!\n\n :return: modules list\n :rtype: list\n "
] |
Please provide a description of the function:def propagate_timezone_option(self):
if self.use_timezone:
# first apply myself
os.environ['TZ'] = self.use_timezone
time.tzset()
tab = [self.schedulers, self.pollers, self.brokers, self.receivers, self.reactionners]
for sat_list in tab:
for sat in sat_list:
if sat.use_timezone == 'NOTSET':
setattr(sat, 'use_timezone', self.use_timezone) | [
"Set our timezone value and give it too to unset satellites\n\n :return: None\n "
] |
Please provide a description of the function:def linkify_templates(self):
self.hosts.linkify_templates()
self.contacts.linkify_templates()
self.services.linkify_templates()
self.servicedependencies.linkify_templates()
self.hostdependencies.linkify_templates()
self.timeperiods.linkify_templates()
self.hostsextinfo.linkify_templates()
self.servicesextinfo.linkify_templates()
self.escalations.linkify_templates()
# But also old srv and host escalations
self.serviceescalations.linkify_templates()
self.hostescalations.linkify_templates() | [
" Like for normal object, we link templates with each others\n\n :return: None\n "
] |
Please provide a description of the function:def check_error_on_hard_unmanaged_parameters(self):
valid = True
if self.use_regexp_matching:
msg = "use_regexp_matching parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'failure_prediction_enabled', None):
msg = "failure_prediction_enabled parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'obsess_over_hosts', None):
msg = "obsess_over_hosts parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ochp_command', None):
msg = "ochp_command parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ochp_timeout', None):
msg = "ochp_timeout parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'obsess_over_services', None):
msg = "obsess_over_services parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ocsp_command', None):
msg = "ocsp_command parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
if getattr(self, 'ocsp_timeout', None):
msg = "ocsp_timeout parameter is not managed."
logger.warning(msg)
self.add_warning(msg)
valid &= False
return valid | [
"Some parameters are just not managed like O*HP commands and regexp capabilities\n\n :return: True if we encounter an error, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def is_correct(self): # pylint: disable=too-many-branches, too-many-statements, too-many-locals
logger.info('Running pre-flight check on configuration data, initial state: %s',
self.conf_is_correct)
valid = self.conf_is_correct
# Check if alignak_name is defined
if not self.alignak_name:
logger.info('Alignak name is not defined, using the main arbiter name...')
for arbiter in self.arbiters:
if not arbiter.spare:
self.alignak_name = arbiter.name
break
logger.info('Alignak name is: %s', self.alignak_name)
# Globally unmanaged parameters
if not self.read_config_silent:
logger.info('Checking global parameters...')
# Old Nagios legacy unmanaged parameters
self.check_error_on_hard_unmanaged_parameters()
# If we got global event handlers, they should be valid
if self.global_host_event_handler and not self.global_host_event_handler.is_valid():
msg = "[%s::%s] global host event_handler '%s' is invalid" \
% (self.my_type, self.get_name(), self.global_host_event_handler.command)
self.add_error(msg)
valid = False
if self.global_service_event_handler and not self.global_service_event_handler .is_valid():
msg = "[%s::%s] global service event_handler '%s' is invalid" \
% (self.my_type, self.get_name(), self.global_service_event_handler .command)
self.add_error(msg)
valid = False
if not self.read_config_silent:
logger.info('Checked')
if not self.read_config_silent:
logger.info('Checking monitoring configuration...')
classes = [strclss for _, _, strclss, _, _ in list(self.types_creations.values())]
for strclss in sorted(classes):
if strclss in ['hostescalations', 'serviceescalations']:
logger.debug("Ignoring correctness check for '%s'...", strclss)
continue
if not self.read_config_silent:
logger.info('- checking %s...', strclss)
try:
checked_list = getattr(self, strclss)
except AttributeError: # pragma: no cover, simple protection
logger.info("\t%s are not present in the configuration", strclss)
continue
if not checked_list.is_correct():
if not self.read_config_silent:
logger.info('Checked %s, configuration is incorrect!', strclss)
valid = False
self.configuration_errors += checked_list.configuration_errors
self.add_error("%s configuration is incorrect!" % strclss)
logger.error("%s configuration is incorrect!", strclss)
if checked_list.configuration_warnings:
self.configuration_warnings += checked_list.configuration_warnings
logger.info(" %d warning(s), total: %d",
len(checked_list.configuration_warnings),
len(self.configuration_warnings))
if not self.read_config_silent:
try:
dump_list = sorted(checked_list, key=lambda k: k.get_name())
except AttributeError: # pragma: no cover, simple protection
dump_list = checked_list
# Dump at DEBUG level because some tests break with INFO level, and it is not
# really necessary to have information about each object ;
for cur_obj in dump_list:
if strclss == 'services':
logger.debug(' %s', cur_obj.get_full_name())
else:
logger.debug(' %s', cur_obj.get_name())
if checked_list:
logger.info(' checked %d', len(checked_list))
else:
logger.info(' none')
if not self.read_config_silent:
logger.info('Checked')
# Parse hosts and services for tags and realms
hosts_tag = set()
services_tag = set()
for host in self.hosts:
hosts_tag.add(host.poller_tag)
for service in self.services:
services_tag.add(service.poller_tag)
# Check that for each poller_tag of a host, a poller exists with this tag
pollers_tag = set()
for poller in self.pollers:
for tag in poller.poller_tags:
pollers_tag.add(tag)
if not hosts_tag.issubset(pollers_tag):
for tag in hosts_tag.difference(pollers_tag):
self.add_error("Error: some hosts have the poller_tag %s but no poller "
"has this tag" % tag)
valid = False
if not services_tag.issubset(pollers_tag):
for tag in services_tag.difference(pollers_tag):
self.add_error("some services have the poller_tag %s but no poller "
"has this tag" % tag)
valid = False
# Check that all hosts involved in business_rules are from the same realm
for item in self.hosts:
if not item.got_business_rule:
continue
realm = self.realms[item.realm]
if not realm:
# Something was wrong in the conf, will be raised elsewhere
continue
for elt_uuid in item.business_rule.list_all_elements():
if elt_uuid not in self.hosts:
# An error or a service element
continue
host = self.hosts[elt_uuid]
if host.realm not in self.realms:
# Something was wrong in the conf, will be raised elsewhere
continue
host_realm = self.realms[host.realm]
if host_realm.get_name() != realm.get_name():
logger.error("Business_rule '%s' got some hosts from another realm: %s",
item.get_full_name(), host_realm.get_name())
self.add_error("Error: Business_rule '%s' got hosts from another "
"realm: %s" % (item.get_full_name(), host_realm.get_name()))
valid = False
# for lst in [self.services, self.hosts]:
# for item in lst:
# if item.got_business_rule:
# e_ro = self.realms[item.realm]
# # Something was wrong in the conf, will be raised elsewhere
# if not e_ro:
# continue
# e_r = e_ro.realm_name
# for elt_uuid in item.business_rule.list_all_elements():
# if elt_uuid in self.hosts:
# elt = self.hosts[elt_uuid]
# else:
# elt = self.services[elt_uuid]
# r_o = self.realms[elt.realm]
# # Something was wrong in the conf, will be raised elsewhere
# if not r_o:
# continue
# elt_r = r_o.realm_name
# if elt_r != e_r:
# logger.error("Business_rule '%s' got hosts from another realm: %s",
# item.get_full_name(), elt_r)
# self.add_error("Error: Business_rule '%s' got hosts from another "
# "realm: %s" % (item.get_full_name(), elt_r))
# valid = False
if self.configuration_errors:
valid = False
logger.error("Configuration errors:")
for msg in self.configuration_errors:
logger.error(msg)
# If configuration error messages exist, then the configuration is not valid
self.conf_is_correct = valid | [
"Check if all elements got a good configuration\n\n :return: True if the configuration is correct else False\n :rtype: bool\n "
] |
Please provide a description of the function:def explode_global_conf(self):
for cls, _, strclss, _, _ in list(self.types_creations.values()):
logger.debug("Applying global conf for the class '%s'...", strclss)
cls.load_global_conf(self) | [
"Explode parameters like cached_service_check_horizon in the\n Service class in a cached_check_horizon manner, o*hp commands etc\n\n :return: None\n "
] |
Please provide a description of the function:def remove_templates(self):
self.hosts.remove_templates()
self.contacts.remove_templates()
self.services.remove_templates()
self.servicedependencies.remove_templates()
self.hostdependencies.remove_templates()
self.timeperiods.remove_templates() | [
"Clean useless elements like templates because they are not needed anymore\n\n :return: None\n "
] |
Please provide a description of the function:def show_errors(self):
if self.configuration_warnings:
logger.warning("Configuration warnings:")
for msg in self.configuration_warnings:
logger.warning(msg)
if self.configuration_errors:
logger.warning("Configuration errors:")
for msg in self.configuration_errors:
logger.warning(msg) | [
"\n Loop over configuration warnings and log them as INFO log\n Loop over configuration errors and log them as INFO log\n\n Note that the warnings and errors are logged on the fly during the configuration parsing.\n It is not necessary to log as WARNING and ERROR in this function which is used as a sum-up\n on the end of configuration parsing when an error has been detected.\n\n :return: None\n "
] |
Please provide a description of the function:def create_packs(self):
# pylint: disable=too-many-statements,too-many-locals,too-many-branches, unused-argument
logger.info("- creating hosts packs for the realms:")
# We create a graph with host in nodes
graph = Graph()
graph.add_nodes(list(self.hosts.items.keys()))
# links will be used for relations between hosts
links = set()
# Now the relations
for host in self.hosts:
# Add parent relations
for parent in getattr(host, 'parents', []):
if parent:
links.add((parent, host.uuid))
# Add the others dependencies
for (dep, _, _, _) in host.act_depend_of:
links.add((dep, host.uuid))
for (dep, _, _, _, _) in host.chk_depend_of:
links.add((dep, host.uuid))
# For services: they are linked with their own host but we need
# to have the hosts of the service dependency in the same pack too
for service in self.services:
for (dep_id, _, _, _) in service.act_depend_of:
if dep_id in self.services:
dep = self.services[dep_id]
else:
dep = self.hosts[dep_id]
# I don't care about dep host: they are just the host
# of the service...
if hasattr(dep, 'host'):
links.add((dep.host, service.host))
# The other type of dep
for (dep_id, _, _, _, _) in service.chk_depend_of:
if dep_id in self.services:
dep = self.services[dep_id]
else:
dep = self.hosts[dep_id]
links.add((dep.host, service.host))
# For host/service that are business based, we need to link them too
for service in [srv for srv in self.services if srv.got_business_rule]:
for elem_uuid in service.business_rule.list_all_elements():
if elem_uuid in self.services:
elem = self.services[elem_uuid]
if elem.host != service.host: # do not link a host with itself
links.add((elem.host, service.host))
else: # it's already a host but only if it is in the known hosts list!
if elem_uuid in self.hosts and elem_uuid != service.host:
links.add((elem_uuid, service.host))
# Same for hosts of course
for host in [hst for hst in self.hosts if hst.got_business_rule]:
for elem_uuid in host.business_rule.list_all_elements():
if elem_uuid in self.services: # if it's a service
elem = self.services[elem_uuid]
if elem.host != host.uuid:
links.add((elem.host, host.uuid))
else: # e is a host
if elem_uuid != host.uuid:
links.add((elem_uuid, host.uuid))
# Now we create links in the graph. With links (set)
# We are sure to call the less add_edge
for (dep, host) in links:
graph.add_edge(dep, host)
graph.add_edge(host, dep)
# Now We find the default realm
default_realm = self.realms.get_default()
# Access_list from a node il all nodes that are connected
# with it: it's a list of ours mini_packs
# Now we look if all elements of all packs have the
# same realm. If not, not good!
for hosts_pack in graph.get_accessibility_packs():
passively_checked_hosts = False
actively_checked_hosts = False
tmp_realms = set()
logger.debug(" - host pack hosts:")
for host_id in hosts_pack:
host = self.hosts[host_id]
logger.debug(" - %s", host.get_name())
passively_checked_hosts = passively_checked_hosts or host.passive_checks_enabled
actively_checked_hosts = actively_checked_hosts or host.active_checks_enabled
if host.realm:
tmp_realms.add(host.realm)
if len(tmp_realms) > 1:
self.add_error("Error: the realm configuration of your hosts is not correct "
"because there is more than one realm in one pack (host relations):")
for host_id in hosts_pack:
host = self.hosts[host_id]
if not host.realm:
self.add_error(' -> the host %s do not have a realm' % host.get_name())
else:
# Do not use get_name for the realm because it is not an object but a
# string containing the not found realm name if the realm is not existing!
# As of it, it may raise an exception
if host.realm not in self.realms:
self.add_error(' -> the host %s is in the realm %s' %
(host.get_name(), host.realm))
else:
host_realm = self.realms[host.realm]
self.add_error(' -> the host %s is in the realm %s' %
(host.get_name(), host_realm.get_name()))
if len(tmp_realms) == 1: # Ok, good
tmp_realm = tmp_realms.pop()
if tmp_realm in self.realms:
realm = self.realms[tmp_realm]
else:
realm = self.realms.find_by_name(tmp_realm)
if not realm:
self.add_error(' -> some hosts are in an unknown realm %s!' % tmp_realm)
else:
# Set the current hosts pack to its realm
logger.debug(" - append pack %s to realm %s", hosts_pack, realm.get_name())
realm.packs.append(hosts_pack)
# Set if the realm only has passively or actively checked hosts...
realm.passively_checked_hosts = passively_checked_hosts
realm.actively_checked_hosts = actively_checked_hosts
elif not tmp_realms: # Hum... no realm value? So default Realm
if default_realm is not None:
# Set the current hosts pack to the default realm
default_realm.packs.append(hosts_pack)
else:
self.add_error("Error: some hosts do not have a realm and you did not "
"defined a default realm!")
for host in hosts_pack:
self.add_error(' Impacted host: %s ' % host.get_name())
# The load balancing is for a loop, so all
# hosts of a realm (in a pack) will be dispatched
# to the schedulers of this realm
# REF: doc/pack-aggregation.png
# Count the numbers of elements in all the realms,
# to compare with the total number of hosts
nb_elements_all_realms = 0
for realm in self.realms:
packs = {}
# create round-robin iterator for id of cfg
# So dispatching is load balanced in a realm
# but add a entry in the round-robin tourniquet for
# every weight point schedulers (so Weight round robin)
weight_list = []
no_spare_schedulers = realm.schedulers
if not no_spare_schedulers:
if realm.potential_schedulers:
no_spare_schedulers = [realm.potential_schedulers[0]]
nb_schedulers = len(no_spare_schedulers)
if nb_schedulers:
logger.info(" %d scheduler(s) for the realm %s", nb_schedulers, realm.get_name())
else:
logger.warning(" no scheduler for the realm %s", realm.get_name())
# Maybe there is no scheduler in the realm, it can be a
# big problem if there are elements in packs
nb_elements = 0
for hosts_pack in realm.packs:
nb_elements += len(hosts_pack)
nb_elements_all_realms += len(hosts_pack)
realm.hosts_count = nb_elements
if nb_elements:
if not nb_schedulers:
self.add_error("The realm %s has %d hosts but no scheduler!"
% (realm.get_name(), nb_elements))
realm.packs = [] # Dumb pack
continue
logger.info(" %d hosts in the realm %s, distributed in %d linked packs",
nb_elements, realm.get_name(), len(realm.packs))
else:
logger.info(" no hosts in the realm %s", realm.get_name())
# Create a relation between a pack and each scheduler in the realm
packindex = 0
packindices = {}
for s_id in no_spare_schedulers:
scheduler = self.schedulers[s_id]
logger.debug(" scheduler: %s", scheduler.instance_id)
packindices[s_id] = packindex
packindex += 1
for i in range(0, scheduler.weight):
weight_list.append(s_id)
logger.debug(" pack indices: %s", packindices)
# packindices is indexed with the scheduler id and contains
# the configuration part number to get used: sched1:0, sched2: 1, ...
round_robin = itertools.cycle(weight_list)
# We must initialize nb_schedulers packs
for i in range(0, nb_schedulers):
packs[i] = []
# Try to load the history association dict so we will try to
# send the hosts in the same "pack"
assoc = {}
# Now we explode the numerous packs into reals packs:
# we 'load balance' them in a round-robin way but with count number of hosts in
# case have some packs with too many hosts and other with few
realm.packs.sort(reverse=True)
pack_higher_hosts = 0
for hosts_pack in realm.packs:
valid_value = False
old_pack = -1
for host_id in hosts_pack:
host = self.hosts[host_id]
old_i = assoc.get(host.get_name(), -1)
# Maybe it's a new, if so, don't count it
if old_i == -1:
continue
# Maybe it is the first we look at, if so, take it's value
if old_pack == -1 and old_i != -1:
old_pack = old_i
valid_value = True
continue
if old_i == old_pack:
valid_value = True
if old_i != old_pack:
valid_value = False
# If it's a valid sub pack and the pack id really exist, use it!
if valid_value and old_pack in packindices:
i = old_pack
else:
if isinstance(i, int):
i = next(round_robin)
elif (len(packs[packindices[i]]) + len(hosts_pack)) >= pack_higher_hosts:
pack_higher_hosts = (len(packs[packindices[i]]) + len(hosts_pack))
i = next(round_robin)
for host_id in hosts_pack:
host = self.hosts[host_id]
packs[packindices[i]].append(host_id)
assoc[host.get_name()] = i
# Now packs is a dictionary indexed with the configuration part
# number and containing the list of hosts
realm.packs = packs
logger.info(" total number of hosts in all realms: %d", nb_elements_all_realms)
if len(self.hosts) != nb_elements_all_realms:
logger.warning("There are %d hosts defined, and %d hosts dispatched in the realms. "
"Some hosts have been ignored", len(self.hosts), nb_elements_all_realms)
self.add_error("There are %d hosts defined, and %d hosts dispatched in the realms. "
"Some hosts have been "
"ignored" % (len(self.hosts), nb_elements_all_realms)) | [
"Create packs of hosts and services (all dependencies are resolved)\n It create a graph. All hosts are connected to their\n parents, and hosts without parent are connected to host 'root'.\n services are linked to their host. Dependencies between hosts/services are managed.\n REF: doc/pack-creation.png\n\n :return: None\n "
] |
Please provide a description of the function:def cut_into_parts(self):
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
# User must have set a spare if he needed one
logger.info("Splitting the configuration into parts:")
nb_parts = 0
for realm in self.realms:
no_spare_schedulers = realm.schedulers
if not no_spare_schedulers:
if realm.potential_schedulers:
no_spare_schedulers = [realm.potential_schedulers[0]]
nb_schedulers = len(no_spare_schedulers)
nb_parts += nb_schedulers
if nb_schedulers:
logger.info(" %d scheduler(s) for the realm %s", nb_schedulers, realm.get_name())
else:
logger.warning(" no scheduler for the realm %s", realm.get_name())
if nb_parts == 0:
nb_parts = 1
# We create dummy configurations for schedulers:
# they are clone of the master configuration but without hosts and
# services (because they are splitted between these configurations)
logger.info("Splitting the configuration into %d parts...", nb_parts)
self.parts = {}
for part_index in range(0, nb_parts):
self.parts[part_index] = Config()
# Now we copy all properties of conf into the new ones
for prop, entry in sorted(list(Config.properties.items())):
# Do not copy the configuration instance id nor name!
if prop in ['instance_id', 'config_name']:
continue
# Only the one that are managed and used
if entry.managed and not isinstance(entry, UnusedProp):
val = getattr(self, prop, None)
setattr(self.parts[part_index], prop, val)
# Set the cloned configuration name
self.parts[part_index].config_name = "%s (%d)" % (self.config_name, part_index)
logger.debug("- cloning configuration: %s -> %s",
self.parts[part_index].config_name, self.parts[part_index])
# Copy the configuration objects lists. We need a deepcopy because each configuration
# will have some new groups... but we create a new uuid
self.parts[part_index].uuid = get_a_new_object_id()
types_creations = self.__class__.types_creations
for o_type in types_creations:
(_, clss, inner_property, _, clonable) = types_creations[o_type]
if not clonable:
logger.debug(" . do not clone: %s", inner_property)
continue
# todo: Indeed contactgroups should be managed like hostgroups...
if inner_property in ['hostgroups', 'servicegroups']:
new_groups = []
for group in getattr(self, inner_property):
new_groups.append(group.copy_shell())
setattr(self.parts[part_index], inner_property, clss(new_groups))
elif inner_property in ['hosts', 'services']:
setattr(self.parts[part_index], inner_property, clss([]))
else:
setattr(self.parts[part_index], inner_property, getattr(self, inner_property))
logger.debug(" . cloned %s: %s -> %s", inner_property,
getattr(self, inner_property),
getattr(self.parts[part_index], inner_property))
# The elements of the others conf will be tag here
self.parts[part_index].other_elements = {}
# No scheduler has yet accepted the configuration
self.parts[part_index].is_assigned = False
self.parts[part_index].scheduler_link = None
self.parts[part_index].push_flavor = ''
# Once parts got created, the current configuration has some 'parts'
# self.parts is the configuration split into parts for the schedulers
# Just create packs. There can be numerous ones
# In pack we've got hosts and service and packs are in the realms
logger.debug("Creating packs for realms...")
self.create_packs()
# Once packs got created, all the realms have some 'packs'
logger.info("Realms:")
for realm in self.realms:
logger.info(" - realm: %s", realm)
for idx in realm.packs:
logger.info(" - pack: %s / %d hosts (%s)",
idx, len(realm.packs[idx]), ','.join([self.hosts[host_id].get_name()
for host_id in realm.packs[idx]]))
# We have packs for realms and elements into configurations, let's merge this...
logger.info("Realms:")
offset = 0
for realm in self.realms:
logger.info(" Realm: %s", realm)
for idx in realm.packs:
logger.info(" - pack: %s / %d hosts", idx, len(realm.packs[idx]))
if not realm.packs[idx]:
logger.info(" - no hosts are declared in this realm pack.")
# continue
try:
instance_id = self.parts[idx + offset].instance_id
for host_id in realm.packs[idx]:
host = self.hosts[host_id]
self.parts[idx + offset].hosts.add_item(host)
for service_id in host.services:
service = self.services[service_id]
self.parts[idx + offset].services.add_item(service)
# Now the conf can be linked with the realm
realm.parts.update({instance_id: self.parts[idx + offset]})
# offset += 1
except KeyError:
logger.info(" - no configuration part is affected "
"because of mismatching hosts packs / schedulers count. "
"Probably too much schedulers for the hosts count!")
offset += len(realm.packs)
del realm.packs
# We've nearly have hosts and services. Now we want real hosts (Class)
# And we want groups too
for part_index in self.parts:
cfg = self.parts[part_index]
# Fill host groups
for ori_hg in self.hostgroups:
hostgroup = cfg.hostgroups.find_by_name(ori_hg.get_name())
mbrs_id = []
for host in ori_hg.members:
if host != '':
mbrs_id.append(host)
for host in cfg.hosts:
if host.uuid in mbrs_id:
hostgroup.members.append(host.uuid)
# And also relink the hosts with the valid hostgroups
for host in cfg.hosts:
orig_hgs = host.hostgroups
nhgs = []
for ohg_id in orig_hgs:
ohg = self.hostgroups[ohg_id]
nhg = cfg.hostgroups.find_by_name(ohg.get_name())
nhgs.append(nhg.uuid)
host.hostgroups = nhgs
# Fill servicegroup
for ori_sg in self.servicegroups:
servicegroup = cfg.servicegroups.find_by_name(ori_sg.get_name())
mbrs = ori_sg.members
mbrs_id = []
for service in mbrs:
if service != '':
mbrs_id.append(service)
for service in cfg.services:
if service.uuid in mbrs_id:
servicegroup.members.append(service.uuid)
# And also relink the services with the valid servicegroups
for host in cfg.services:
orig_hgs = host.servicegroups
nhgs = []
for ohg_id in orig_hgs:
ohg = self.servicegroups[ohg_id]
nhg = cfg.servicegroups.find_by_name(ohg.get_name())
nhgs.append(nhg.uuid)
host.servicegroups = nhgs
# Now we fill other_elements by host (service are with their host
# so they are not tagged)
logger.info("Configuration parts:")
for part_index in self.parts:
for host in self.parts[part_index].hosts:
for j in [j for j in self.parts if j != part_index]: # So other than i
self.parts[part_index].other_elements[host.get_name()] = part_index
logger.info("- part: %d - %s, %d hosts", part_index, self.parts[part_index],
len(self.parts[part_index].hosts)) | [
"Cut conf into part for scheduler dispatch.\n\n Basically it provides a set of host/services for each scheduler that\n have no dependencies between them\n\n :return: None\n "
] |
Please provide a description of the function:def prepare_for_sending(self):
if [arbiter_link for arbiter_link in self.arbiters if arbiter_link.spare]:
logger.info('Serializing the configuration for my spare arbiter...')
# Now serialize the whole configuration, for sending to spare arbiters
self.spare_arbiter_conf = serialize(self) | [
"The configuration needs to be serialized before being sent to a spare arbiter\n\n :return: None\n "
] |
Please provide a description of the function:def dump(self, dump_file_name=None):
config_dump = {}
for _, _, category, _, _ in list(self.types_creations.values()):
try:
objs = [jsonify_r(i) for i in getattr(self, category)]
except (TypeError, AttributeError): # pragma: no cover, simple protection
logger.warning("Dumping configuration, '%s' not present in the configuration",
category)
continue
container = getattr(self, category)
if category == "services":
objs = sorted(objs,
key=lambda o: "%s/%s" % (o["host_name"], o["service_description"]))
elif hasattr(container, "name_property"):
name_prop = container.name_property
objs = sorted(objs, key=lambda o, prop=name_prop: getattr(o, prop, ''))
config_dump[category] = objs
if not dump_file_name:
dump_file_name = os.path.join(tempfile.gettempdir(),
'alignak-%s-cfg-dump-%d.json'
% (self.name, int(time.time())))
try:
logger.info('Dumping configuration to: %s', dump_file_name)
fd = open(dump_file_name, "w")
fd.write(json.dumps(config_dump, indent=4, separators=(',', ': '), sort_keys=True))
fd.close()
logger.info('Dumped')
except (OSError, IndexError) as exp: # pragma: no cover, should never happen...
logger.critical("Error when dumping configuration to %s: %s", dump_file_name, str(exp)) | [
"Dump configuration to a file in a JSON format\n\n :param dump_file_name: the file to dump configuration to\n :type dump_file_name: str\n :return: None\n "
] |
Please provide a description of the function:def add(self, elt):
if isinstance(elt, Brok):
# For brok, we tag the brok with our instance_id
elt.instance_id = self.instance_id
if elt.type == 'monitoring_log':
# The brok is a monitoring event
with self.events_lock:
self.events.append(elt)
statsmgr.counter('events', 1)
else:
with self.broks_lock:
self.broks.append(elt)
statsmgr.counter('broks.added', 1)
elif isinstance(elt, ExternalCommand):
logger.debug("Queuing an external command '%s'", str(elt.__dict__))
with self.external_commands_lock:
self.external_commands.append(elt)
statsmgr.counter('external-commands.added', 1)
else: # pragma: no cover, simple dev alerting
logger.error('Do not manage object type %s (%s)', type(elt), elt) | [
"Generic function to add objects to the daemon internal lists.\n Manage Broks, External commands\n\n :param elt: objects to add\n :type elt: alignak.AlignakObject\n :return: None\n "
] |
Please provide a description of the function:def push_broks_to_broker(self): # pragma: no cover - not used!
someone_is_concerned = False
sent = False
for broker_link in self.conf.brokers:
# Send only if the broker is concerned...
if not broker_link.manage_arbiters:
continue
someone_is_concerned = True
if broker_link.reachable:
logger.debug("Sending %d broks to the broker %s", len(self.broks), broker_link.name)
if broker_link.push_broks(self.broks):
statsmgr.counter('broks.pushed.count', len(self.broks))
sent = True
if not someone_is_concerned or sent:
# No one is anymore interested with...
del self.broks[:] | [
"Send all broks from arbiter internal list to broker\n\n The arbiter get some broks and then pushes them to all the brokers.\n\n :return: None\n "
] |
Please provide a description of the function:def push_external_commands_to_schedulers(self): # pragma: no cover - not used!
# Now get all external commands and push them to the schedulers
for external_command in self.external_commands:
self.external_commands_manager.resolve_command(external_command)
# Now for all reachable schedulers, send the commands
sent = False
for scheduler_link in self.conf.schedulers:
ext_cmds = scheduler_link.external_commands
if ext_cmds and scheduler_link.reachable:
logger.debug("Sending %d commands to the scheduler %s",
len(ext_cmds), scheduler_link.name)
if scheduler_link.push_external_commands(ext_cmds):
statsmgr.counter('external-commands.pushed.count', len(ext_cmds))
sent = True
if sent:
# Clean the pushed commands
scheduler_link.external_commands.clear() | [
"Send external commands to schedulers\n\n :return: None\n "
] |
Please provide a description of the function:def get_broks_from_satellites(self): # pragma: no cover - not used!
for satellites in [self.conf.brokers, self.conf.schedulers,
self.conf.pollers, self.conf.reactionners, self.conf.receivers]:
for satellite in satellites:
# Get only if reachable...
if not satellite.reachable:
continue
logger.debug("Getting broks from: %s", satellite.name)
new_broks = satellite.get_and_clear_broks()
if new_broks:
logger.debug("Got %d broks from: %s", len(new_broks), satellite.name)
for brok in new_broks:
self.add(brok) | [
"Get broks from my all internal satellite links\n\n The arbiter get the broks from ALL the known satellites\n\n :return: None\n "
] |
Please provide a description of the function:def get_initial_broks_from_satellites(self):
for satellites in [self.conf.brokers, self.conf.schedulers,
self.conf.pollers, self.conf.reactionners, self.conf.receivers]:
for satellite in satellites:
# Get only if reachable...
if not satellite.reachable:
continue
logger.debug("Getting initial brok from: %s", satellite.name)
brok = satellite.get_initial_status_brok()
logger.debug("Satellite '%s' initial brok: %s", satellite.name, brok)
self.add(brok) | [
"Get initial broks from my internal satellite links\n\n :return: None\n "
] |
Please provide a description of the function:def load_monitoring_config_file(self, clean=True):
# pylint: disable=too-many-branches,too-many-statements, too-many-locals
self.loading_configuration = True
_t_configuration = time.time()
if self.verify_only:
# Force adding a console handler to the Alignak logger
set_log_console(logging.INFO if not self.debug else logging.DEBUG)
# Force the global logger at INFO level
set_log_level(logging.INFO if not self.debug else logging.DEBUG)
logger.info("-----")
logger.info("Arbiter is in configuration check mode")
logger.info("Arbiter log level got increased to a minimum of INFO")
logger.info("-----")
# Maybe we do not have environment file
# if not self.alignak_env:
# self.exit_on_error("*** No Alignak environment file. Exiting...", exit_code=2)
# else:
# logger.info("Environment file: %s", self.env_filename)
if self.legacy_cfg_files:
logger.info("Loading monitored system configuration from legacy files: %s",
self.legacy_cfg_files)
else:
logger.info("No legacy file(s) configured for monitored system configuration")
# Alignak global environment file
# -------------------------------
# Here we did not yet read the Alignak configuration file (except for the Arbiter daemon
# configuration.
# We must get the Alignak macros and global configuration parameters
# ---------------------
# Manage Alignak macros; this before loading the legacy configuration files
# with their own potential macros
# ---------------------
macros = []
# Get the macros / variables declared in the Alignak environment (alignak.ini) file!
if self.alignak_env:
# The properties defined in the alignak.cfg file are not yet set! So we set the one
# got from the environment
logger.info("Getting Alignak macros...")
alignak_macros = self.alignak_env.get_alignak_macros()
if alignak_macros:
# Remove the leading and trailing underscores
for key in sorted(alignak_macros.keys()):
value = alignak_macros[key]
if key[0] == '_' or key[0] == '$':
key = key[1:]
if key[-1] == '_' or key[-1] == '$':
key = key[:-1]
# Create an old legacy macro format
macros.append('$%s$=%s' % (key.upper(), value))
logger.debug("- Alignak macro '$%s$' = %s", key.upper(), value)
# and then the global configuration.
# The properties defined in the alignak.cfg file are not yet set! So we set the one
# got from the appropriate section of the Alignak environment file
logger.info("Getting Alignak configuration...")
alignak_configuration = self.alignak_env.get_alignak_configuration()
if alignak_configuration:
for key in sorted(alignak_configuration.keys()):
value = alignak_configuration[key]
if key.startswith('_'):
# Ignore configuration variables prefixed with _
continue
if key in self.conf.properties:
entry = self.conf.properties[key]
setattr(self.conf, key, entry.pythonize(value))
else:
setattr(self.conf, key, value)
logger.debug("- setting '%s' as %s", key, getattr(self.conf, key))
logger.info("Got Alignak global configuration")
self.alignak_name = getattr(self.conf, "alignak_name", self.name)
logger.info("Configuration for Alignak: %s", self.alignak_name)
if macros:
self.conf.load_params(macros)
# Here we got the macros and alignak configuration variables from the
# alignak.ini configuration!
# The self Config object is now initialized with the global Alignak variables.
# We can now read and parse the legacy configuration files (if any...)
raw_objects = self.conf.read_config_buf(
self.conf.read_legacy_cfg_files(self.legacy_cfg_files,
self.alignak_env.cfg_files if self.alignak_env
else None)
)
if self.alignak_name != getattr(self.conf, "alignak_name", self.name):
self.alignak_name = getattr(self.conf, "alignak_name", self.name)
logger.warning("Alignak name changed from the legacy Cfg files: %s", self.alignak_name)
# Maybe conf is already invalid
if not self.conf.conf_is_correct:
self.conf.show_errors()
self.request_stop("*** One or more problems were encountered while "
"processing the configuration (first check)...", exit_code=1)
if self.legacy_cfg_files:
logger.info("I correctly loaded the legacy configuration files")
# Hacking some global parameters inherited from Nagios to create
# on the fly some Broker modules like for status.dat parameters
# or nagios.log one if there are none already available
if 'module' not in raw_objects:
raw_objects['module'] = []
extra_modules = self.conf.hack_old_nagios_parameters()
if extra_modules:
logger.info("Some inner modules were configured for Nagios legacy parameters")
for _, module in extra_modules:
raw_objects['module'].append(module)
logger.debug("Extra modules: %s", extra_modules)
# Alignak global environment file
# -------------------------------
# Here we got the monitored system configuration from the legacy configuration files
# We must overload this configuration for the daemons and modules with the configuration
# declared in the Alignak environment (alignak.ini) file!
if self.alignak_env:
# Update the daemons legacy configuration if not complete
for daemon_type in ['arbiter', 'scheduler', 'broker',
'poller', 'reactionner', 'receiver']:
if daemon_type not in raw_objects:
raw_objects[daemon_type] = []
# Get all the Alignak daemons from the configuration
logger.info("Getting daemons configuration...")
some_daemons = False
for daemon_name, daemon_cfg in list(self.alignak_env.get_daemons().items()):
logger.info("Got a daemon configuration for %s", daemon_name)
if 'type' not in daemon_cfg:
self.conf.add_error("Ignoring daemon with an unknown type: %s" % daemon_name)
continue
some_daemons = True
daemon_type = daemon_cfg['type']
daemon_name = daemon_cfg['name']
logger.info("- got a %s named %s, spare: %s",
daemon_type, daemon_name, daemon_cfg.get('spare', False))
# If this daemon is found in the legacy configuration, replace this
new_cfg_daemons = []
for cfg_daemon in raw_objects[daemon_type]:
if cfg_daemon.get('name', 'unset') == daemon_name \
or cfg_daemon.get("%s_name" % daemon_type,
'unset') == [daemon_name]:
logger.info(" updating daemon Cfg file configuration")
else:
new_cfg_daemons.append(cfg_daemon)
new_cfg_daemons.append(daemon_cfg)
raw_objects[daemon_type] = new_cfg_daemons
logger.debug("Checking daemons configuration:")
some_legacy_daemons = False
for daemon_type in ['arbiter', 'scheduler', 'broker',
'poller', 'reactionner', 'receiver']:
for cfg_daemon in raw_objects[daemon_type]:
some_legacy_daemons = True
if 'name' not in cfg_daemon:
cfg_daemon['name'] = cfg_daemon['%s_name' % daemon_type]
cfg_daemon['modules'] = \
self.alignak_env.get_modules(daemon_name=cfg_daemon['name'])
for module_daemon_type, module in extra_modules:
if module_daemon_type == daemon_type:
cfg_daemon['modules'].append(module['name'])
logger.info("- added an Alignak inner module '%s' to the %s: %s",
module['name'], daemon_type, cfg_daemon['name'])
logger.debug("- %s / %s: ", daemon_type, cfg_daemon['name'])
logger.debug(" %s", cfg_daemon)
if not some_legacy_daemons:
logger.debug("- No legacy configured daemons.")
else:
logger.info("- some dameons are configured in legacy Cfg files. "
"You should update the configuration with the new Alignak "
"configuration file.")
if not some_daemons and not some_legacy_daemons:
logger.info("- No configured daemons.")
# and then get all modules from the configuration
logger.info("Getting modules configuration...")
if 'module' in raw_objects and raw_objects['module']:
# Manage the former parameters module_alias and module_types
# - replace with name and type
for module_cfg in raw_objects['module']:
if 'module_alias' not in module_cfg and 'name' not in module_cfg:
self.conf.add_error("Module declared without any 'name' or 'module_alias'")
continue
else:
if 'name' not in module_cfg:
module_cfg['name'] = module_cfg['module_alias']
module_cfg.pop('module_alias')
if 'module_types' in module_cfg and 'type' not in module_cfg:
module_cfg['type'] = module_cfg['module_types']
module_cfg.pop('module_types')
logger.debug("Module cfg %s params: %s", module_cfg['name'], module_cfg)
for _, module_cfg in list(self.alignak_env.get_modules().items()):
logger.info("- got a module %s, type: %s",
module_cfg.get('name', 'unset'), module_cfg.get('type', 'untyped'))
# If this module is found in the former Cfg files, replace the former configuration
for cfg_module in raw_objects['module']:
if cfg_module.get('name', 'unset') == [module_cfg['name']]:
logger.info(" updating module Cfg file configuration")
cfg_module = module_cfg
logger.info("Module %s updated parameters: %s",
module_cfg['name'], module_cfg)
break
else:
raw_objects['module'].append(module_cfg)
logger.debug("Module env %s params: %s", module_cfg['name'], module_cfg)
if 'module' in raw_objects and not raw_objects['module']:
logger.info("- No configured modules.")
# Create objects for our arbiters and modules
self.conf.early_create_objects(raw_objects)
# Check that an arbiter link exists and create the appropriate relations
# If no arbiter exists, create one with the provided data
params = {}
if self.alignak_env:
params = self.alignak_env.get_alignak_configuration()
self.conf.early_arbiter_linking(self.name, params)
# Search which arbiter I am in the arbiter links list
for lnk_arbiter in self.conf.arbiters:
logger.debug("I have an arbiter in my configuration: %s", lnk_arbiter.name)
if lnk_arbiter.name != self.name:
# Arbiter is not me!
logger.info("I found another arbiter (%s) in my (%s) configuration",
lnk_arbiter.name, self.name)
# And this arbiter needs to receive a configuration
lnk_arbiter.need_conf = True
continue
logger.info("I found myself in the configuration: %s", lnk_arbiter.name)
if self.link_to_myself is None:
# I update only if it does not yet exist (first configuration load)!
# I will not change myself because I am simply reloading a configuration ;)
self.link_to_myself = lnk_arbiter
self.link_to_myself.instance_id = self.name
self.link_to_myself.push_flavor = ''.encode('utf-8')
# self.link_to_myself.hash = self.conf.hash
# Set myself as alive ;)
self.link_to_myself.set_alive()
# We consider that this arbiter is a master one...
self.is_master = not self.link_to_myself.spare
if self.is_master:
logger.info("I am the master Arbiter.")
else:
logger.info("I am a spare Arbiter.")
# ... and that this arbiter do not need to receive a configuration
lnk_arbiter.need_conf = False
if not self.link_to_myself:
self.conf.show_errors()
self.request_stop("Error: I cannot find my own configuration (%s), I bail out. "
"To solve this, please change the arbiter name parameter in "
"the Alignak configuration file (certainly alignak.ini) "
"with the value '%s'."
" Thanks." % (self.name, socket.gethostname()), exit_code=1)
# Whether I am a spare arbiter, I will parse the whole configuration. This may be useful
# if the master fails before sending its configuration to me!
# An Arbiter which is not a master one will not go further...
# todo: is it a good choice?:
# 1/ why reading all the configuration files stuff?
# 2/ why not loading configuration data from the modules?
# -> Indeed, here, only the main configuration has been fetched by the arbiter.
# Perharps, loading only the alignak.ini would be enough for a spare arbiter.
# And it will make it simpler to configure...
if not self.is_master:
logger.info("I am not the master arbiter, I stop parsing the configuration")
self.loading_configuration = False
return
# We load our own modules
self.do_load_modules(self.link_to_myself.modules)
# Call modules that manage this read configuration pass
_ts = time.time()
self.hook_point('read_configuration')
statsmgr.timer('hook.read_configuration', time.time() - _ts)
# Call modules get_alignak_configuration() to load Alignak configuration parameters
# todo: re-enable this feature if it is really needed. It is a bit tricky to manage
# configuration from our own configuration file and from an external source :(
# (example modules: alignak_backend)
# _t0 = time.time()
# self.load_modules_alignak_configuration()
# statsmgr.timer('core.hook.get_alignak_configuration', time.time() - _t0)
# Call modules get_objects() to load new objects our own modules
# (example modules: alignak_backend)
self.load_modules_configuration_objects(raw_objects)
# Create objects for all the configuration
self.conf.create_objects(raw_objects)
# Maybe configuration is already invalid
if not self.conf.conf_is_correct:
self.conf.show_errors()
self.request_stop("*** One or more problems were encountered while processing "
"the configuration (second check)...", exit_code=1)
# Manage all post-conf modules
self.hook_point('early_configuration')
# Here we got all our Alignak configuration and the monitored system configuration
# from the legacy configuration files and extra modules.
logger.info("Preparing configuration...")
# Create Template links
self.conf.linkify_templates()
# All inheritances
self.conf.apply_inheritance()
# Explode between types
self.conf.explode()
# Implicit inheritance for services
self.conf.apply_implicit_inheritance()
# Fill default values for all the configuration objects
self.conf.fill_default_configuration()
# Remove templates from config
self.conf.remove_templates()
# Overrides specific service instances properties
self.conf.override_properties()
# Linkify objects to each other
self.conf.linkify()
# applying dependencies
self.conf.apply_dependencies()
# Raise warning about currently unmanaged parameters
if self.verify_only:
self.conf.warn_about_unmanaged_parameters()
# Explode global configuration parameters into Classes
self.conf.explode_global_conf()
# set our own timezone and propagate it to other satellites
self.conf.propagate_timezone_option()
# Look for business rules, and create the dep tree
self.conf.create_business_rules()
# And link them
self.conf.create_business_rules_dependencies()
# Set my own parameters from the loaded configuration
# Last monitoring events
self.recent_events = deque(maxlen=int(os.environ.get('ALIGNAK_EVENTS_LOG_COUNT',
self.conf.events_log_count)))
# Manage all post-conf modules
self.hook_point('late_configuration')
# Configuration is correct?
logger.info("Checking configuration...")
self.conf.is_correct()
# Clean objects of temporary/unnecessary attributes for live work:
if clean:
logger.info("Cleaning configuration objects...")
self.conf.clean()
# Dump Alignak macros
logger.debug("Alignak global macros:")
macro_resolver = MacroResolver()
macro_resolver.init(self.conf)
for macro_name in sorted(self.conf.macros):
macro_value = macro_resolver.resolve_simple_macros_in_string("$%s$" % macro_name, [],
None, None)
logger.debug("- $%s$ = %s", macro_name, macro_value)
statsmgr.timer('configuration.loading', time.time() - _t_configuration)
# REF: doc/alignak-conf-dispatching.png (2)
logger.info("Splitting configuration...")
self.conf.cut_into_parts()
# Here, the self.conf.parts exist
# And the realms have some 'packs'
# Check if all the configuration daemons will be available
if not self.daemons_start(run_daemons=False):
self.conf.show_errors()
self.request_stop("*** Alignak will not be able to manage the configured daemons. "
"Check and update your configuration!", exit_code=1)
# Some properties need to be prepared (somehow "flatten"...) before being sent,
# This to prepare the configuration that will be sent to our spare arbiter (if any)
self.conf.prepare_for_sending()
statsmgr.timer('configuration.spliting', time.time() - _t_configuration)
# Here, the self.conf.spare_arbiter_conf exist
# Still a last configuration check because some things may have changed when
# we cut the configuration into parts (eg. hosts and realms consistency) and
# when we prepared the configuration for sending
if not self.conf.conf_is_correct: # pragma: no cover, not with unit tests.
self.conf.show_errors()
self.request_stop("Configuration is incorrect, sorry, I bail out", exit_code=1)
logger.info("Things look okay - "
"No serious problems were detected during the pre-flight check")
# Exit if we are just here for config checking
if self.verify_only:
logger.info("Arbiter %s checked the configuration", self.name)
if self.conf.missing_daemons:
logger.warning("Some missing daemons were detected in the parsed configuration. "
"Nothing to worry about, but you should define them, "
"else Alignak will use its default configuration.")
# Display found warnings and errors
self.conf.show_errors()
self.request_stop()
del raw_objects
# Display found warnings and errors
self.conf.show_errors()
# Now I have a configuration!
self.have_conf = True
self.loading_configuration = False
statsmgr.timer('configuration.available', time.time() - _t_configuration) | [
"Load main configuration file (alignak.cfg)::\n\n * Read all files given in the -c parameters\n * Read all .cfg files in cfg_dir\n * Read all files in cfg_file\n * Create objects (Arbiter, Module)\n * Set HTTP links info (ssl etc)\n * Load its own modules\n * Execute read_configuration hook (for arbiter modules)\n * Create all objects (Service, Host, Realms ...)\n * \"Compile\" configuration (Linkify, explode, apply inheritance, fill default values ...)\n * Cut conf into parts and prepare it for sending\n\n The clean parameter is useful to load a configuration without removing the properties\n only used to parse the configuration and create the objects. Some utilities (like\n alignak-backend-import script) may need to avoid the cleaning ;)\n\n :param clean: set True to clean the created items\n :type clean: bool\n\n :return: None\n "
] |
Please provide a description of the function:def load_modules_configuration_objects(self, raw_objects): # pragma: no cover,
# not yet with unit tests.
# Now we ask for configuration modules if they
# got items for us
for instance in self.modules_manager.instances:
logger.debug("Getting objects from the module: %s", instance.name)
if not hasattr(instance, 'get_objects'):
logger.debug("The module '%s' do not provide any objects.", instance.name)
return
try:
logger.info("Getting Alignak monitored configuration objects from module '%s'",
instance.name)
got_objects = instance.get_objects()
except Exception as exp: # pylint: disable=broad-except
logger.exception("Module %s get_objects raised an exception %s. "
"Log and continue to run.", instance.name, exp)
continue
if not got_objects:
logger.warning("The module '%s' did not provided any objects.", instance.name)
return
types_creations = self.conf.types_creations
for o_type in types_creations:
(_, _, prop, _, _) = types_creations[o_type]
if prop in ['arbiters', 'brokers', 'schedulers',
'pollers', 'reactionners', 'receivers', 'modules']:
continue
if prop not in got_objects:
logger.warning("Did not get any '%s' objects from %s", prop, instance.name)
continue
for obj in got_objects[prop]:
# test if raw_objects[k] are already set - if not, add empty array
if o_type not in raw_objects:
raw_objects[o_type] = []
# Update the imported_from property if the module did not set
if 'imported_from' not in obj:
obj['imported_from'] = 'module:%s' % instance.name
# Append to the raw objects
raw_objects[o_type].append(obj)
logger.debug("Added %i %s objects from %s",
len(got_objects[prop]), o_type, instance.name) | [
"Load configuration objects from arbiter modules\n If module implements get_objects arbiter will call it and add create\n objects\n\n :param raw_objects: raw objects we got from reading config files\n :type raw_objects: dict\n :return: None\n "
] |
Please provide a description of the function:def load_modules_alignak_configuration(self): # pragma: no cover, not yet with unit tests.
alignak_cfg = {}
# Ask configured modules if they got configuration for us
for instance in self.modules_manager.instances:
if not hasattr(instance, 'get_alignak_configuration'):
return
try:
logger.info("Getting Alignak global configuration from module '%s'", instance.name)
cfg = instance.get_alignak_configuration()
alignak_cfg.update(cfg)
except Exception as exp: # pylint: disable=broad-except
logger.error("Module %s get_alignak_configuration raised an exception %s. "
"Log and continue to run", instance.name, str(exp))
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Back trace of this remove: %s", output.getvalue())
output.close()
continue
params = []
if alignak_cfg:
logger.info("Got Alignak global configuration:")
for key, value in sorted(alignak_cfg.items()):
logger.info("- %s = %s", key, value)
# properties starting with an _ character are "transformed" to macro variables
if key.startswith('_'):
key = '$' + key[1:].upper() + '$'
# properties valued as None are filtered
if value is None:
continue
# properties valued as None string are filtered
if value == 'None':
continue
# properties valued as empty strings are filtered
if value == '':
continue
# set properties as legacy Shinken configuration files
params.append("%s=%s" % (key, value))
self.conf.load_params(params) | [
"Load Alignak configuration from the arbiter modules\n If module implements get_alignak_configuration, call this function\n\n :param raw_objects: raw objects we got from reading config files\n :type raw_objects: dict\n :return: None\n "
] |
Please provide a description of the function:def request_stop(self, message='', exit_code=0):
# Only a master arbiter can stop the daemons
if self.is_master:
# Stop the daemons
self.daemons_stop(timeout=self.conf.daemons_stop_timeout)
# Request the daemon stop
super(Arbiter, self).request_stop(message, exit_code) | [
"Stop the Arbiter daemon\n\n :return: None\n "
] |
Please provide a description of the function:def start_daemon(self, satellite):
logger.info(" launching a daemon for: %s/%s...", satellite.type, satellite.name)
# The daemon startup script location may be defined in the configuration
daemon_script_location = getattr(self.conf, 'daemons_script_location', self.bindir)
if not daemon_script_location:
daemon_script_location = "alignak-%s" % satellite.type
else:
daemon_script_location = "%s/alignak-%s" % (daemon_script_location, satellite.type)
# Some extra arguments may be defined in the Alignak configuration
daemon_arguments = getattr(self.conf, 'daemons_arguments', '')
args = [daemon_script_location,
"--name", satellite.name,
"--environment", self.env_filename,
"--host", str(satellite.host),
"--port", str(satellite.port)]
if daemon_arguments:
args.append(daemon_arguments)
logger.info(" ... with some arguments: %s", args)
try:
process = psutil.Popen(args, stdin=None, stdout=None, stderr=None)
# A brief pause...
time.sleep(0.1)
except Exception as exp: # pylint: disable=broad-except
logger.error("Error when launching %s: %s", satellite.name, exp)
logger.error("Command: %s", args)
return False
logger.info(" %s launched (pid=%d, gids=%s)",
satellite.name, process.pid, process.gids())
# My satellites/daemons map
self.my_daemons[satellite.name] = {
'satellite': satellite,
'process': process
}
return True | [
"Manage the list of detected missing daemons\n\n If the daemon does not in exist `my_daemons`, then:\n - prepare daemon start arguments (port, name and log file)\n - start the daemon\n - make sure it started correctly\n\n :param satellite: the satellite for which a daemon is to be started\n :type satellite: SatelliteLink\n\n :return: True if the daemon started correctly\n "
] |
Please provide a description of the function:def daemons_start(self, run_daemons=True):
result = True
if run_daemons:
logger.info("Alignak configured daemons start:")
else:
logger.info("Alignak configured daemons check:")
# Parse the list of the missing daemons and try to run the corresponding processes
for satellites_list in [self.conf.arbiters, self.conf.receivers, self.conf.reactionners,
self.conf.pollers, self.conf.brokers, self.conf.schedulers]:
for satellite in satellites_list:
logger.info("- found %s, to be launched: %s, address: %s",
satellite.name, satellite.alignak_launched, satellite.uri)
if satellite == self.link_to_myself:
# Ignore myself ;)
continue
if satellite.alignak_launched and \
satellite.address not in ['127.0.0.1', 'localhost']:
logger.error("Alignak is required to launch a daemon for %s %s "
"but the satelitte is defined on an external address: %s",
satellite.type, satellite.name, satellite.address)
result = False
continue
if not run_daemons:
# When checking, ignore the daemon launch part...
continue
if not satellite.alignak_launched:
logger.debug("Alignak will not launch '%s'")
continue
if not satellite.active:
logger.warning("- daemon '%s' is declared but not set as active, "
"do not start...", satellite.name)
continue
if satellite.name in self.my_daemons:
logger.warning("- daemon '%s' is already running", satellite.name)
continue
started = self.start_daemon(satellite)
result = result and started
return result | [
"Manage the list of the daemons in the configuration\n\n Check if the daemon needs to be started by the Arbiter.\n\n If so, starts the daemon if `run_daemons` is True\n\n :param run_daemons: run the daemons or make a simple check\n :type run_daemons: bool\n\n :return: True if all daemons are running, else False. always True for a simple check\n "
] |
Please provide a description of the function:def daemons_check(self):
# First look if it's not too early to ping
start = time.time()
if self.daemons_last_check \
and self.daemons_last_check + self.conf.daemons_check_period > start:
logger.debug("Too early to check daemons, check period is %.2f seconds",
self.conf.daemons_check_period)
return True
logger.debug("Alignak launched daemons check")
result = True
procs = [psutil.Process()]
for daemon in list(self.my_daemons.values()):
# Get only the daemon (not useful for its children processes...)
# procs = daemon['process'].children()
procs.append(daemon['process'])
for proc in procs:
try:
logger.debug("Process %s is %s", proc.name(), proc.status())
# logger.debug("Process listening:", proc.name(), proc.status())
# for connection in proc.connections():
# l_addr, l_port = connection.laddr if connection.laddr else ('', 0)
# r_addr, r_port = connection.raddr if connection.raddr else ('', 0)
# logger.debug("- %s:%s <-> %s:%s, %s", l_addr, l_port, r_addr, r_port,
# connection.status)
# Reset the daemon connection if it got broked...
if not daemon['satellite'].con:
if self.daemon_connection_init(daemon['satellite']):
# Set my satellite as alive :)
daemon['satellite'].set_alive()
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
# Probably stopping...
if not self.will_stop and proc == daemon['process']:
logger.warning("Daemon %s/%s is not running!",
daemon['satellite'].type, daemon['satellite'].name)
logger.debug("Access denied - Process %s is %s", proc.name(), proc.status())
if not self.start_daemon(daemon['satellite']):
# Set my satellite as dead :(
daemon['satellite'].set_dead()
result = False
else:
logger.info("I restarted %s/%s",
daemon['satellite'].type, daemon['satellite'].name)
logger.info("Pausing %.2f seconds...", 0.5)
time.sleep(0.5)
else:
logger.info("Child process %s is %s", proc.name(), proc.status())
# Set the last check as now
self.daemons_last_check = start
logger.debug("Checking daemons duration: %.2f seconds", time.time() - start)
return result | [
"Manage the list of Alignak launched daemons\n\n Check if the daemon process is running\n\n :return: True if all daemons are running, else False\n "
] |
Please provide a description of the function:def daemons_stop(self, timeout=30, kill_children=False):
def on_terminate(proc):
logger.debug("process %s terminated with exit code %s", proc.pid, proc.returncode)
result = True
if self.my_daemons:
logger.info("Alignak self-launched daemons stop:")
start = time.time()
for daemon in list(self.my_daemons.values()):
# Terminate the daemon and its children process
procs = []
if kill_children:
procs = daemon['process'].children()
procs.append(daemon['process'])
for process in procs:
try:
logger.info("- terminating process %s", process.name())
process.terminate()
except psutil.AccessDenied:
logger.warning("Process %s is %s", process.name(), process.status())
procs = []
for daemon in list(self.my_daemons.values()):
# Stop the daemon and its children process
if kill_children:
procs = daemon['process'].children()
procs.append(daemon['process'])
_, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# Kill processes
for process in alive:
logger.warning("Process %s did not stopped, trying to kill", process.name())
process.kill()
_, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for process in alive:
logger.warning("process %s survived SIGKILL; giving up", process.name())
result = False
logger.debug("Stopping daemons duration: %.2f seconds", time.time() - start)
return result | [
"Stop the Alignak daemons\n\n Iterate over the self-launched daemons and their children list to send a TERM\n Wait for daemons to terminate and then send a KILL for those that are not yet stopped\n\n As a default behavior, only the launched daemons are killed, not their children.\n Each daemon will manage its children killing\n\n :param timeout: delay to wait before killing a daemon\n :type timeout: int\n\n :param kill_children: also kill the children (defaults to False)\n :type kill_children: bool\n\n :return: True if all daemons stopped\n ",
"Process termination callback function"
] |
Please provide a description of the function:def daemons_reachability_check(self):
# First look if it's not too early to ping
start = time.time()
if self.daemons_last_reachable_check and \
self.daemons_last_reachable_check + self.conf.daemons_check_period > start:
logger.debug("Too early to check daemons reachability, check period is %.2f seconds",
self.conf.daemons_check_period)
return True
_t0 = time.time()
logger.debug("Alignak daemons reachability check")
result = self.dispatcher.check_reachable()
statsmgr.timer('dispatcher.check-alive', time.time() - _t0)
_t0 = time.time()
logger.debug("Alignak daemons status get")
events = self.dispatcher.check_status_and_get_events()
duration = time.time() - _t0
statsmgr.timer('dispatcher.check-status', duration)
logger.debug("Getting daemons status duration: %.2f seconds", duration)
# Send the collected events to the Alignak logger
for event in events:
event.prepare()
make_monitoring_log(event.data['level'], event.data['message'],
timestamp=event.creation_time, to_logger=True)
# Add to the recent events for the WS endpoint
event.data['timestamp'] = event.creation_time
event.data['date'] = datetime.fromtimestamp(event.creation_time).\
strftime(self.conf.events_date_format)
event.data.pop('instance_id')
self.recent_events.append(event.data)
# Set the last check as now
self.daemons_last_reachable_check = start
logger.debug("Checking daemons reachability duration: %.2f seconds", time.time() - start)
return result | [
"Manage the list of Alignak launched daemons\n\n Check if the daemon process is running\n\n :return: True if all daemons are running, else False\n "
] |
Please provide a description of the function:def setup_new_conf(self):
# pylint: disable=too-many-locals
# Execute the base class treatment...
super(Arbiter, self).setup_new_conf()
with self.conf_lock:
logger.info("I received a new configuration from my master")
# Get the new configuration
self.cur_conf = self.new_conf
# self_conf is our own configuration from the alignak environment
# Arbiters do not have this property in the received configuration because
# they already loaded a configuration on daemon load
self_conf = self.cur_conf.get('self_conf', None)
if not self_conf:
self_conf = self.conf
# whole_conf contains the full configuration load by my master
whole_conf = self.cur_conf['whole_conf']
logger.debug("Received a new configuration, containing:")
for key in self.cur_conf:
logger.debug("- %s: %s", key, self.cur_conf[key])
logger.debug("satellite self configuration part: %s", self_conf)
# Update Alignak name
self.alignak_name = self.cur_conf['alignak_name']
logger.info("My Alignak instance: %s", self.alignak_name)
# This to indicate that the new configuration got managed...
self.new_conf = {}
# Get the whole monitored objects configuration
t00 = time.time()
try:
received_conf_part = unserialize(whole_conf)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter"
}
logger.error(self.new_conf['_status'])
logger.error("Back trace of the error:\n%s", traceback.format_exc())
return
except Exception as exp: # pylint: disable=broad-except
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter"
}
logger.error(self.new_conf['_status'])
logger.error(self.new_conf)
self.exit_on_exception(exp, self.new_conf)
logger.info("Monitored configuration %s received at %d. Un-serialized in %d secs",
received_conf_part, t00, time.time() - t00)
# Now we create our arbiters and schedulers links
my_satellites = getattr(self, 'arbiters', {})
received_satellites = self.cur_conf['arbiters']
for link_uuid in received_satellites:
rs_conf = received_satellites[link_uuid]
logger.debug("- received %s - %s: %s", rs_conf['instance_id'],
rs_conf['type'], rs_conf['name'])
# Must look if we already had a configuration and save our broks
already_got = rs_conf['instance_id'] in my_satellites
broks = []
actions = {}
wait_homerun = {}
external_commands = {}
running_id = 0
if already_got:
logger.warning("I already got: %s", rs_conf['instance_id'])
# Save some information
running_id = my_satellites[link_uuid].running_id
(broks, actions,
wait_homerun, external_commands) = \
my_satellites[link_uuid].get_and_clear_context()
# Delete the former link
del my_satellites[link_uuid]
# My new satellite link...
new_link = SatelliteLink.get_a_satellite_link('arbiter', rs_conf)
my_satellites[new_link.uuid] = new_link
logger.info("I got a new arbiter satellite: %s", new_link)
new_link.running_id = running_id
new_link.external_commands = external_commands
new_link.broks = broks
new_link.wait_homerun = wait_homerun
new_link.actions = actions
# # replacing satellite address and port by those defined in satellite_map
# if new_link.name in self_conf.satellite_map:
# overriding = self_conf.satellite_map[new_link.name]
# # satellite = dict(satellite) # make a copy
# # new_link.update(self_conf.get('satellite_map', {})[new_link.name])
# logger.warning("Do not override the configuration for: %s, with: %s. "
# "Please check whether this is necessary!",
# new_link.name, overriding)
# for arbiter_link in received_conf_part.arbiters:
# logger.info("I have arbiter links in my configuration: %s", arbiter_link.name)
# if arbiter_link.name != self.name and not arbiter_link.spare:
# # Arbiter is not me!
# logger.info("I found my master arbiter in the configuration: %s",
# arbiter_link.name)
# continue
#
# logger.info("I found myself in the received configuration: %s", arbiter_link.name)
# self.link_to_myself = arbiter_link
# # We received a configuration s we are not a master !
# self.is_master = False
# self.link_to_myself.spare = True
# # Set myself as alive ;)
# self.link_to_myself.set_alive()
# Now I have a configuration!
self.have_conf = True | [
" Setup a new configuration received from a Master arbiter.\n\n TODO: perharps we should not accept the configuration or raise an error if we do not\n find our own configuration data in the data. Thus this should never happen...\n :return: None\n "
] |
Please provide a description of the function:def wait_for_master_death(self):
logger.info("Waiting for master death")
timeout = 1.0
self.last_master_ping = time.time()
master_timeout = 300
for arbiter_link in self.conf.arbiters:
if not arbiter_link.spare:
master_timeout = \
arbiter_link.spare_check_interval * arbiter_link.spare_max_check_attempts
logger.info("I'll wait master death for %d seconds", master_timeout)
while not self.interrupted:
# Make a pause and check if the system time changed
_, tcdiff = self.make_a_pause(timeout)
# If there was a system time change then we have to adapt last_master_ping:
if tcdiff:
self.last_master_ping += tcdiff
if self.new_conf:
self.setup_new_conf()
sys.stdout.write(".")
sys.stdout.flush()
# Now check if master is dead or not
now = time.time()
if now - self.last_master_ping > master_timeout:
logger.info("Arbiter Master is dead. The arbiter %s takes the lead!",
self.link_to_myself.name)
for arbiter_link in self.conf.arbiters:
if not arbiter_link.spare:
arbiter_link.alive = False
self.must_run = True
break | [
"Wait for a master timeout and take the lead if necessary\n\n :return: None\n "
] |
Please provide a description of the function:def check_and_log_tp_activation_change(self):
for timeperiod in self.conf.timeperiods:
brok = timeperiod.check_and_log_activation_change()
if brok:
self.add(brok) | [
"Raise log for timeperiod change (useful for debug)\n\n :return: None\n "
] |
Please provide a description of the function:def manage_signal(self, sig, frame):
# Request the arbiter to stop
if sig in [signal.SIGINT, signal.SIGTERM]:
logger.info("received a signal: %s", SIGNALS_TO_NAMES_DICT[sig])
self.kill_request = True
self.kill_timestamp = time.time()
logger.info("request to stop in progress")
else:
Daemon.manage_signal(self, sig, frame) | [
"Manage signals caught by the process\n Specific behavior for the arbiter when it receives a sigkill or sigterm\n\n :param sig: signal caught by the process\n :type sig: str\n :param frame: current stack frame\n :type frame:\n :return: None\n "
] |
Please provide a description of the function:def configuration_dispatch(self, not_configured=None):
if not not_configured:
self.dispatcher = Dispatcher(self.conf, self.link_to_myself)
# I set my own dispatched configuration as the provided one...
# because I will not push a configuration to myself :)
self.cur_conf = self.conf
# Loop for the first configuration dispatching, if the first dispatch fails, bail out!
# Without a correct configuration, Alignak daemons will not run correctly
first_connection_try_count = 0
logger.info("Connecting to my satellites...")
while True:
first_connection_try_count += 1
# Initialize connection with all our satellites
self.all_connected = True
for satellite in self.dispatcher.all_daemons_links:
if satellite == self.link_to_myself:
continue
if not satellite.active:
continue
connected = self.daemon_connection_init(satellite, set_wait_new_conf=True)
logger.debug(" %s is %s", satellite, connected)
self.all_connected = self.all_connected and connected
if self.all_connected:
logger.info("- satellites connection #%s is ok", first_connection_try_count)
break
else:
logger.warning("- satellites connection #%s is not correct; "
"let's give another chance after %d seconds...",
first_connection_try_count,
self.link_to_myself.polling_interval)
if first_connection_try_count >= 3:
self.request_stop("All the daemons connections could not be established "
"despite %d tries! "
"Sorry, I bail out!" % first_connection_try_count,
exit_code=4)
time.sleep(self.link_to_myself.polling_interval)
# Now I have a connection with all the daemons I need to contact them,
# check they are alive and ready to run
_t0 = time.time()
self.all_connected = self.dispatcher.check_reachable()
statsmgr.timer('dispatcher.check-alive', time.time() - _t0)
_t0 = time.time()
# Preparing the configuration for dispatching
logger.info("Preparing the configuration for dispatching...")
self.dispatcher.prepare_dispatch()
statsmgr.timer('dispatcher.prepare-dispatch', time.time() - _t0)
logger.info("- configuration is ready to dispatch")
# Loop for the first configuration dispatching, if the first dispatch fails, bail out!
# Without a correct configuration, Alignak daemons will not run correctly
first_dispatch_try_count = 0
logger.info("Dispatching the configuration to my satellites...")
while True:
first_dispatch_try_count += 1
# Check reachable - if a configuration is prepared, this will force the
# daemons communication, and the dispatching will be launched
_t0 = time.time()
logger.info("- configuration dispatching #%s...", first_dispatch_try_count)
self.dispatcher.check_reachable(forced=True)
statsmgr.timer('dispatcher.dispatch', time.time() - _t0)
# Make a pause to let our satellites get ready...
pause = max(1, max(self.conf.daemons_dispatch_timeout, len(self.my_daemons) * 0.5))
# pause = len(self.my_daemons) * 0.2
logger.info("- pausing %d seconds...", pause)
time.sleep(pause)
_t0 = time.time()
logger.info("- checking configuration dispatch...")
# Checking the dispatch is accepted
self.dispatcher.check_dispatch()
statsmgr.timer('dispatcher.check-dispatch', time.time() - _t0)
if self.dispatcher.dispatch_ok:
logger.info("- configuration dispatching #%s is ok", first_dispatch_try_count)
break
else:
logger.warning("- configuration dispatching #%s is not correct; "
"let's give another chance...", first_dispatch_try_count)
if first_dispatch_try_count >= 3:
self.request_stop("The configuration could not be dispatched despite %d tries! "
"Sorry, I bail out!" % first_connection_try_count,
exit_code=4) | [
"Monitored configuration preparation and dispatch\n\n :return: None\n "
] |
Please provide a description of the function:def do_before_loop(self):
logger.info("I am the arbiter: %s", self.link_to_myself.name)
# If I am a spare, I do not have anything to do here...
if not self.is_master:
logger.debug("Waiting for my master death...")
return
# Arbiter check if some daemons need to be started
if not self.daemons_start(run_daemons=True):
self.request_stop(message="Some Alignak daemons did not started correctly.",
exit_code=4)
if not self.daemons_check():
self.request_stop(message="Some Alignak daemons cannot be checked.",
exit_code=4)
# Make a pause to let our started daemons get ready...
pause = max(1, max(self.conf.daemons_start_timeout, len(self.my_daemons) * 0.5))
if pause:
logger.info("Pausing %.2f seconds...", pause)
time.sleep(pause)
# Prepare and dispatch the monitored configuration
self.configuration_dispatch()
# Now we can get all initial broks for our satellites
_t0 = time.time()
self.get_initial_broks_from_satellites()
statsmgr.timer('broks.get-initial', time.time() - _t0)
# Now create the external commands manager
# We are a dispatcher: our role is to dispatch commands to the schedulers
self.external_commands_manager = ExternalCommandManager(
self.conf, 'dispatcher', self, self.conf.accept_passive_unknown_check_results,
self.conf.log_external_commands) | [
"Called before the main daemon loop.\n\n :return: None\n "
] |
Please provide a description of the function:def do_loop_turn(self):
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
# If I am a spare, I only wait for the master arbiter to die...
if not self.is_master:
logger.debug("Waiting for my master death...")
self.wait_for_master_death()
return
if self.loop_count % self.alignak_monitor_period == 1:
self.get_alignak_status(details=True)
# Maybe an external process requested Alignak stop...
if self.kill_request:
logger.info("daemon stop mode ...")
if not self.dispatcher.stop_request_sent:
logger.info("entering daemon stop mode, time before exiting: %s",
self.conf.daemons_stop_timeout)
self.dispatcher.stop_request()
if time.time() > self.kill_timestamp + self.conf.daemons_stop_timeout:
logger.info("daemon stop mode delay reached, immediate stop")
self.dispatcher.stop_request(stop_now=True)
time.sleep(1)
self.interrupted = True
logger.info("exiting...")
if not self.kill_request:
# Main loop treatment
# Try to see if one of my module is dead, and restart previously dead modules
self.check_and_del_zombie_modules()
# Call modules that manage a starting tick pass
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
# Look for logging timeperiods activation change (active/inactive)
self.check_and_log_tp_activation_change()
# Check that my daemons are alive
if not self.daemons_check():
if self.conf.daemons_failure_kill:
self.request_stop(message="Some Alignak daemons cannot be checked.",
exit_code=4)
else:
logger.warning("Should have killed my children if "
"'daemons_failure_kill' were set!")
# Now the dispatcher job - check if all daemons are reachable and have a configuration
if not self.daemons_reachability_check():
logger.warning("A new configuration dispatch is required!")
# Prepare and dispatch the monitored configuration
self.configuration_dispatch(self.dispatcher.not_configured)
# Now get things from our module instances
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer('get-objects-from-queues', time.time() - _t0)
# Maybe our satellites raised new broks. Reap them...
_t0 = time.time()
self.get_broks_from_satellites()
statsmgr.timer('broks.got.time', time.time() - _t0)
# One broker is responsible for our broks, we give him our broks
_t0 = time.time()
self.push_broks_to_broker()
statsmgr.timer('broks.pushed.time', time.time() - _t0)
# # We push our external commands to our schedulers...
# _t0 = time.time()
# self.push_external_commands_to_schedulers()
# statsmgr.timer('external-commands.pushed.time', time.time() - _t0)
if self.system_health and (self.loop_count % self.system_health_period == 1):
perfdatas = []
cpu_count = psutil.cpu_count()
perfdatas.append("'cpu_count'=%d" % cpu_count)
logger.debug(" . cpu count: %d", cpu_count)
cpu_percents = psutil.cpu_percent(percpu=True)
cpu = 1
for percent in cpu_percents:
perfdatas.append("'cpu_%d_percent'=%.2f%%" % (cpu, percent))
cpu += 1
cpu_times_percent = psutil.cpu_times_percent(percpu=True)
cpu = 1
for cpu_times_percent in cpu_times_percent:
logger.debug(" . cpu time percent: %s", cpu_times_percent)
for key in cpu_times_percent._fields:
perfdatas.append(
"'cpu_%d_%s_percent'=%.2f%%" % (cpu, key,
getattr(cpu_times_percent, key)))
cpu += 1
logger.info("%s cpu|%s", self.name, " ".join(perfdatas))
perfdatas = []
disk_partitions = psutil.disk_partitions(all=False)
for disk_partition in disk_partitions:
logger.debug(" . disk partition: %s", disk_partition)
disk = getattr(disk_partition, 'mountpoint')
disk_usage = psutil.disk_usage(disk)
logger.debug(" . disk usage: %s", disk_usage)
for key in disk_usage._fields:
if 'percent' in key:
perfdatas.append("'disk_%s_percent_used'=%.2f%%"
% (disk, getattr(disk_usage, key)))
else:
perfdatas.append("'disk_%s_%s'=%dB"
% (disk, key, getattr(disk_usage, key)))
logger.info("%s disks|%s", self.name, " ".join(perfdatas))
perfdatas = []
virtual_memory = psutil.virtual_memory()
logger.debug(" . memory: %s", virtual_memory)
for key in virtual_memory._fields:
if 'percent' in key:
perfdatas.append("'mem_percent_used_%s'=%.2f%%"
% (key, getattr(virtual_memory, key)))
else:
perfdatas.append("'mem_%s'=%dB"
% (key, getattr(virtual_memory, key)))
swap_memory = psutil.swap_memory()
logger.debug(" . memory: %s", swap_memory)
for key in swap_memory._fields:
if 'percent' in key:
perfdatas.append("'swap_used_%s'=%.2f%%"
% (key, getattr(swap_memory, key)))
else:
perfdatas.append("'swap_%s'=%dB"
% (key, getattr(swap_memory, key)))
logger.info("%s memory|%s", self.name, " ".join(perfdatas)) | [
"Loop turn for Arbiter\n\n If not a master daemon, wait for my master death...\n Else, run:\n * Check satellites are alive\n * Check and dispatch (if needed) the configuration\n * Get broks and external commands from the satellites\n * Push broks and external commands to the satellites\n\n :return: None\n "
] |
Please provide a description of the function:def get_daemon_stats(self, details=False): # pylint: disable=too-many-branches
now = int(time.time())
# Call the base Daemon one
res = super(Arbiter, self).get_daemon_stats(details=details)
res.update({
'name': self.link_to_myself.get_name() if self.link_to_myself else self.name,
'type': self.type,
'daemons_states': {}
})
if details:
res['monitoring_objects'] = {}
for _, _, strclss, _, _ in list(self.conf.types_creations.values()):
if strclss in ['hostescalations', 'serviceescalations']:
logger.debug("Ignoring count for '%s'...", strclss)
continue
objects_list = getattr(self.conf, strclss, [])
res['monitoring_objects'][strclss] = {
'count': len(objects_list)
}
res['monitoring_objects'][strclss].update({'items': []})
try:
dump_list = sorted(objects_list, key=lambda k: k.get_name())
except AttributeError: # pragma: no cover, simple protection
dump_list = objects_list
# Dump at DEBUG level because some tests break with INFO level, and it is not
# really necessary to have information about each object ;
for cur_obj in dump_list:
if strclss == 'services':
res['monitoring_objects'][strclss]['items'].append(cur_obj.get_full_name())
else:
res['monitoring_objects'][strclss]['items'].append(cur_obj.get_name())
# Arbiter counters, including the loaded configuration objects and the dispatcher data
counters = res['counters']
counters['external-commands'] = len(self.external_commands)
counters['broks'] = len(self.broks)
for _, _, strclss, _, _ in list(self.conf.types_creations.values()):
if strclss in ['hostescalations', 'serviceescalations']:
logger.debug("Ignoring count for '%s'...", strclss)
continue
objects_list = getattr(self.conf, strclss, [])
counters[strclss] = len(objects_list)
# Configuration dispatch counters
if getattr(self, "dispatcher", None):
for sat_type in ('arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers'):
counters["dispatcher.%s" % sat_type] = len(getattr(self.dispatcher, sat_type))
# Report our daemons states, but only if a dispatcher exists
if getattr(self, 'dispatcher', None):
# Daemon properties that we are interested in
res['daemons_states'] = {}
for satellite in self.dispatcher.all_daemons_links:
if satellite == self.link_to_myself:
continue
# Get the information to be published for a satellite
res['daemons_states'][satellite.name] = satellite.give_satellite_json()
res['livestate'] = {
"timestamp": now,
"daemons": {}
}
state = 0
for satellite in self.dispatcher.all_daemons_links:
if satellite == self.link_to_myself:
continue
livestate = 0
if satellite.active:
if not satellite.reachable:
livestate = 1
elif not satellite.alive:
livestate = 2
state = max(state, livestate)
else:
livestate = 3
res['livestate']['daemons'][satellite.name] = livestate
res['livestate'].update({
"state": state,
"output": [
"all daemons are up and running.",
"warning because some daemons are not reachable.",
"critical because some daemons not responding."
][state],
# "long_output": "Long output...",
# "perf_data": "'counter'=1"
})
return res | [
"Increase the stats provided by the Daemon base class\n\n :return: stats dictionary\n :rtype: dict\n "
] |
Please provide a description of the function:def get_monitoring_problems(self):
res = self.get_id()
res['problems'] = {}
# Report our schedulers information, but only if a dispatcher exists
if getattr(self, 'dispatcher', None) is None:
return res
for satellite in self.dispatcher.all_daemons_links:
if satellite.type not in ['scheduler']:
continue
if not satellite.active:
continue
if satellite.statistics and 'problems' in satellite.statistics:
res['problems'][satellite.name] = {
'_freshness': satellite.statistics['_freshness'],
'problems': satellite.statistics['problems']
}
return res | [
"Get the schedulers satellites problems list\n\n :return: problems dictionary\n :rtype: dict\n "
] |
Please provide a description of the function:def get_livesynthesis(self):
res = self.get_id()
res['livesynthesis'] = {
'_overall': {
'_freshness': int(time.time()),
'livesynthesis': {
'hosts_total': 0,
'hosts_not_monitored': 0,
'hosts_up_hard': 0,
'hosts_up_soft': 0,
'hosts_down_hard': 0,
'hosts_down_soft': 0,
'hosts_unreachable_hard': 0,
'hosts_unreachable_soft': 0,
'hosts_problems': 0,
'hosts_acknowledged': 0,
'hosts_in_downtime': 0,
'hosts_flapping': 0,
'services_total': 0,
'services_not_monitored': 0,
'services_ok_hard': 0,
'services_ok_soft': 0,
'services_warning_hard': 0,
'services_warning_soft': 0,
'services_critical_hard': 0,
'services_critical_soft': 0,
'services_unknown_hard': 0,
'services_unknown_soft': 0,
'services_unreachable_hard': 0,
'services_unreachable_soft': 0,
'services_problems': 0,
'services_acknowledged': 0,
'services_in_downtime': 0,
'services_flapping': 0,
}
}
}
# Report our schedulers information, but only if a dispatcher exists
if getattr(self, 'dispatcher', None) is None:
return res
for satellite in self.dispatcher.all_daemons_links:
if satellite.type not in ['scheduler']:
continue
if not satellite.active:
continue
if 'livesynthesis' in satellite.statistics:
# Scheduler detailed live synthesis
res['livesynthesis'][satellite.name] = {
'_freshness': satellite.statistics['_freshness'],
'livesynthesis': satellite.statistics['livesynthesis']
}
# Cumulated live synthesis
for prop in res['livesynthesis']['_overall']['livesynthesis']:
if prop in satellite.statistics['livesynthesis']:
res['livesynthesis']['_overall']['livesynthesis'][prop] += \
satellite.statistics['livesynthesis'][prop]
return res | [
"Get the schedulers satellites live synthesis\n\n :return: compiled livesynthesis dictionary\n :rtype: dict\n "
] |
Please provide a description of the function:def main(self):
try:
# Start the daemon
if not self.verify_only and not self.do_daemon_init_and_start():
self.exit_on_error(message="Daemon initialization error", exit_code=3)
if self.verify_only:
self.setup_alignak_logger()
# Load monitoring configuration files
self.load_monitoring_config_file()
# Set my own process title
self.set_proctitle(self.name)
# Now we can start our "external" modules (if any):
self.modules_manager.start_external_instances()
# Now we can load the retention data
self.hook_point('load_retention')
# And go for the main loop
while True:
self.do_main_loop()
logger.info("Exited from the main loop.")
# Exiting the main loop because of a configuration reload
if not self.need_config_reload:
# If no configuration reload is required, stop the arbiter daemon
self.request_stop()
else:
# Loop if a configuration reload is raised while
# still reloading the configuration
while self.need_config_reload:
# Clear the former configuration
self.need_config_reload = False
self.link_to_myself = None
self.conf = Config()
# Load monitoring configuration files
_ts = time.time()
logger.warning('--- Reloading configuration...')
self.load_monitoring_config_file()
duration = int(time.time() - _ts)
self.add(make_monitoring_log('info', 'CONFIGURATION RELOAD;%d' % duration))
logger.warning('--- Configuration reloaded, %d seconds', duration)
# Make a pause to let our satellites get ready...
pause = max(1, self.conf.daemons_new_conf_timeout)
if pause:
logger.info("Pausing %.2f seconds...", pause)
time.sleep(pause)
except Exception as exp: # pragma: no cover, this should never happen indeed ;)
# Only a master arbiter can stop the daemons
if self.is_master:
# Stop the daemons
self.daemons_stop(timeout=self.conf.daemons_stop_timeout)
self.exit_on_exception(raised_exception=exp)
raise | [
"Main arbiter function::\n\n * Set logger\n * Init daemon\n * Launch modules\n * Endless main process loop\n\n :return: None\n "
] |
Please provide a description of the function:def overall_state_id(self):
overall_state = 0
if not self.monitored:
overall_state = 5
elif self.acknowledged:
overall_state = 1
elif self.downtimed:
overall_state = 2
elif self.state_type == 'HARD':
if self.state == 'WARNING':
overall_state = 3
elif self.state == 'CRITICAL':
overall_state = 4
elif self.state == 'UNKNOWN':
overall_state = 3
elif self.state == 'UNREACHABLE':
overall_state = 4
return overall_state | [
"Get the service overall state.\n\n The service overall state identifier is the service status including:\n - the monitored state\n - the acknowledged state\n - the downtime state\n\n The overall state is (prioritized):\n - a service is not monitored (5)\n - a service critical or unreachable (4)\n - a service warning or unknown (3)\n - a service downtimed (2)\n - a service acknowledged (1)\n - a service ok (0)\n\n *Note* that services in unknown state are considered as warning, and unreachable ones\n are considered as critical!\n\n Also note that the service state is considered only for HARD state type!\n\n "
] |
Please provide a description of the function:def fill_predictive_missing_parameters(self):
if self.initial_state == 'w':
self.state = u'WARNING'
elif self.initial_state == 'u':
self.state = u'UNKNOWN'
elif self.initial_state == 'c':
self.state = u'CRITICAL'
elif self.initial_state == 'x':
self.state = u'UNREACHABLE' | [
"define state with initial_state\n\n :return: None\n "
] |
Please provide a description of the function:def get_name(self):
if hasattr(self, 'service_description'):
return self.service_description
if hasattr(self, 'name'):
return self.name
return 'SERVICE-DESCRIPTION-MISSING' | [
"Accessor to service_description attribute or name if first not defined\n\n :return: service name\n :rtype: str\n "
] |
Please provide a description of the function:def get_full_name(self):
if self.is_tpl():
return "tpl-%s/%s" % (getattr(self, 'host_name', 'XxX'), self.name)
if hasattr(self, 'host_name') and hasattr(self, 'service_description'):
return "%s/%s" % (self.host_name, self.service_description)
return 'UNKNOWN-SERVICE' | [
"Get the full name for debugging (host_name/service_description)\n\n :return: service full name\n :rtype: str\n "
] |
Please provide a description of the function:def get_groupnames(self, sgs):
return ','.join([sgs[sg].get_name() for sg in self.servicegroups]) | [
"Get servicegroups list\n\n :return: comma separated list of servicegroups\n :rtype: str\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
cls = self.__class__
hname = getattr(self, 'host_name', '')
hgname = getattr(self, 'hostgroup_name', '')
sdesc = getattr(self, 'service_description', '')
if not sdesc:
self.add_error("a %s has been defined without service_description, from: %s"
% (cls, self.imported_from))
elif not hname:
self.add_error("[%s::%s] not bound to any host."
% (self.my_type, self.get_name()))
elif not hname and not hgname:
self.add_error("a %s has been defined without host_name nor "
"hostgroup_name, from: %s" % (self.my_type, self.imported_from))
elif self.host is None:
self.add_error("[%s::%s] unknown host_name '%s'"
% (self.my_type, self.get_name(), self.host_name))
# Set display_name if needed
if not getattr(self, 'display_name', ''):
self.display_name = "%s/%s" % (hname, sdesc)
for char in cls.illegal_object_name_chars:
if char not in self.service_description:
continue
self.add_error("[%s::%s] service_description got an illegal character: %s"
% (self.my_type, self.get_name(), char))
return super(Service, self).is_correct() and state | [
"Check if this object configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def duplicate(self, host):
# pylint: disable=too-many-locals
duplicates = []
# In macro, it's all in UPPER case
prop = self.duplicate_foreach.strip().upper()
if prop not in host.customs: # If I do not have the property, we bail out
return duplicates
# Get the list entry, and the not one if there is one
entry = host.customs[prop]
# Look at the list of the key we do NOT want maybe,
# for _disks it will be _!disks
not_entry = host.customs.get('_' + '!' + prop[1:], '').split(',')
not_keys = strip_and_uniq(not_entry)
default_value = getattr(self, 'default_value', '')
# Transform the generator string to a list
# Missing values are filled with the default value
try:
key_values = tuple(generate_key_value_sequences(entry, default_value))
except KeyValueSyntaxError as exc:
fmt_dict = {
'prop': self.duplicate_foreach,
'host': host.get_name(),
'svc': self.service_description,
'entry': entry,
'exc': exc,
}
err = (
"The custom property %(prop)r of the "
"host %(host)r is not a valid entry for a service generator: %(exc)s, "
"with entry=%(entry)r") % fmt_dict
logger.warning(err)
host.add_error(err)
return duplicates
for key_value in key_values:
key = key_value['KEY']
# Maybe this key is in the NOT list, if so, skip it
if key in not_keys:
continue
new_s = self.copy()
new_s.host_name = host.get_name()
if self.is_tpl(): # if template, the new one is not
new_s.register = 1
for key in key_value:
if key == 'KEY':
if hasattr(self, 'service_description'):
# We want to change all illegal chars to a _ sign.
# We can't use class.illegal_obj_char
# because in the "explode" phase, we do not have access to this data! :(
safe_key_value = re.sub(r'[' + "`~!$%^&*\"|'<>?,()=" + ']+', '_',
key_value[key])
new_s.service_description = self.service_description.replace(
'$' + key + '$', safe_key_value
)
# Here is a list of property where we will expand the $KEY$ by the value
_the_expandables = ['check_command', 'aggregation', 'event_handler']
for prop in _the_expandables:
if hasattr(self, prop):
# here we can replace VALUE, VALUE1, VALUE2,...
setattr(new_s, prop, getattr(new_s, prop).replace('$' + key + '$',
key_value[key]))
if hasattr(self, 'service_dependencies'):
for i, servicedep in enumerate(new_s.service_dependencies):
new_s.service_dependencies[i] = servicedep.replace(
'$' + key + '$', key_value[key]
)
# And then add in our list this new service
duplicates.append(new_s)
return duplicates | [
"For a given host, look for all copy we must create for for_each property\n\n :param host: alignak host object\n :type host: alignak.objects.host.Host\n :return: list\n :rtype: list\n "
] |
Please provide a description of the function:def set_state_from_exit_status(self, status, notif_period, hosts, services):
now = time.time()
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# but only if the global conf have enable the impact state change
cls = self.__class__
if cls.enable_problem_impacts_states_change \
and self.is_impact \
and not self.state_changed_since_impact:
self.last_state = self.state_before_impact
else: # standard case
self.last_state = self.state
# The last times are kept as integer values rather than float... no need for ms!
if status == 0:
self.state = u'OK'
self.state_id = 0
self.last_time_ok = int(self.last_state_update)
# self.last_time_ok = self.last_state_update
state_code = 'o'
elif status == 1:
self.state = u'WARNING'
self.state_id = 1
self.last_time_warning = int(self.last_state_update)
# self.last_time_warning = self.last_state_update
state_code = 'w'
elif status == 2:
self.state = u'CRITICAL'
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
# self.last_time_critical = self.last_state_update
state_code = 'c'
elif status == 3:
self.state = u'UNKNOWN'
self.state_id = 3
self.last_time_unknown = int(self.last_state_update)
# self.last_time_unknown = self.last_state_update
state_code = 'u'
elif status == 4:
self.state = u'UNREACHABLE'
self.state_id = 4
self.last_time_unreachable = int(self.last_state_update)
# self.last_time_unreachable = self.last_state_update
state_code = 'x'
else:
self.state = u'CRITICAL' # exit code UNDETERMINED
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
# self.last_time_critical = self.last_state_update
state_code = 'c'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
# Now we add a value, we update the is_flapping prop
self.update_flapping(notif_period, hosts, services)
if self.state != self.last_state:
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change | [
"Set the state in UP, WARNING, CRITICAL, UNKNOWN or UNREACHABLE\n according to the status of a check result.\n\n :param status: integer between 0 and 4\n :type status: int\n :return: None\n "
] |
Please provide a description of the function:def is_state(self, status):
# pylint: disable=too-many-return-statements
if status == self.state:
return True
# Now low status
if status == 'o' and self.state == u'OK':
return True
if status == 'c' and self.state == u'CRITICAL':
return True
if status == 'w' and self.state == u'WARNING':
return True
if status == 'u' and self.state == u'UNKNOWN':
return True
if status == 'x' and self.state == u'UNREACHABLE':
return True
return False | [
"Return True if status match the current service status\n\n :param status: status to compare ( \"o\", \"c\", \"w\", \"u\", \"x\"). Usually comes from config files\n :type status: str\n :return: True if status <=> self.status, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def last_time_non_ok_or_up(self):
non_ok_times = [x for x in [self.last_time_warning,
self.last_time_critical,
self.last_time_unknown]
if x > self.last_time_ok]
if not non_ok_times:
last_time_non_ok = 0 # todo: program_start would be better?
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok | [
"Get the last time the service was in a non-OK state\n\n :return: the nearest last time the service was not ok\n :rtype: int\n "
] |
Please provide a description of the function:def raise_check_result(self):
if not self.__class__.log_active_checks:
return
log_level = 'info'
if self.state in [u'WARNING', u'UNREACHABLE']:
log_level = 'warning'
elif self.state == u'CRITICAL':
log_level = 'error'
brok = make_monitoring_log(
log_level, 'ACTIVE SERVICE CHECK: %s;%s;%s;%d;%s' % (self.host_name, self.get_name(),
self.state, self.attempt,
self.output)
)
self.broks.append(brok) | [
"Raise ACTIVE CHECK RESULT entry\n Example : \"ACTIVE SERVICE CHECK: server;DOWN;HARD;1;I don't know what to say...\"\n\n :return: None\n "
] |
Please provide a description of the function:def raise_notification_log_entry(self, notif, contact, host_ref):
if self.__class__.log_notifications:
log_level = 'info'
command = notif.command_call
if notif.type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED',
u'CUSTOM', u'ACKNOWLEDGEMENT',
u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED']:
state = '%s (%s)' % (notif.type, self.state)
else:
state = self.state
if self.state == 'WARNING':
log_level = 'warning'
if self.state == 'CRITICAL':
log_level = 'error'
brok = make_monitoring_log(
log_level, "SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s;%s" % (
contact.get_name(), host_ref.get_name(), self.get_name(), state,
notif.notif_nb, command.get_name(), self.output
)
)
self.broks.append(brok)
if 'ALIGNAK_LOG_NOTIFICATIONS' in os.environ:
if os.environ['ALIGNAK_LOG_NOTIFICATIONS'] == 'WARNING':
logger.warning("SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s;%s",
contact.get_name(), host_ref.get_name(), self.get_name(), state,
notif.notif_nb, command.get_name(), self.output)
else:
logger.info("SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s;%s",
contact.get_name(), host_ref.get_name(), self.get_name(), state,
notif.notif_nb, command.get_name(), self.output) | [
"Raise SERVICE NOTIFICATION entry (critical level)\n Format is : \"SERVICE NOTIFICATION: *contact.get_name()*;*host_name*;*self.get_name()*\n ;*state*;*command.get_name()*;*output*\"\n Example : \"SERVICE NOTIFICATION: superadmin;server;Load;UP;notify-by-rss;no output\"\n\n :param notif: notification object created by service alert\n :type notif: alignak.objects.notification.Notification\n :return: None\n "
] |
Please provide a description of the function:def raise_event_handler_log_entry(self, command):
if not self.__class__.log_event_handlers:
return
log_level = 'info'
if self.state == 'WARNING':
log_level = 'warning'
if self.state == 'CRITICAL':
log_level = 'error'
brok = make_monitoring_log(
log_level, "SERVICE EVENT HANDLER: %s;%s;%s;%s;%s;%s" % (
self.host_name, self.get_name(),
self.state, self.state_type,
self.attempt, command.get_name()
)
)
self.broks.append(brok) | [
"Raise SERVICE EVENT HANDLER entry (critical level)\n Format is : \"SERVICE EVENT HANDLER: *host_name*;*self.get_name()*;*state*;*state_type*\n ;*attempt*;*command.get_name()*\"\n Example : \"SERVICE EVENT HANDLER: server;Load;UP;HARD;1;notify-by-rss\"\n\n :param command: Handler launched\n :type command: alignak.objects.command.Command\n :return: None\n "
] |
Please provide a description of the function:def get_data_for_notifications(self, contact, notif, host_ref):
if not host_ref:
return [self, contact, notif]
return [host_ref, self, contact, notif] | [
"Get data for a notification\n\n :param contact: The contact to return\n :type contact:\n :param notif: the notification to return\n :type notif:\n :return: list containing the service, the host and the given parameters\n :rtype: list\n "
] |
Please provide a description of the function:def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished):
# pylint: disable=too-many-return-statements
logger.debug("Checking if a service %s (%s) notification is blocked...",
self.get_full_name(), self.state)
host = hosts[self.host]
if t_wished is None:
t_wished = time.time()
# TODO
# forced notification
# pass if this is a custom notification
# Block if notifications are program-wide disabled
# Block if notifications are disabled for this service
# Block if the current status is in the notification_options w,u,c,r,f,s
if not self.enable_notifications or \
not self.notifications_enabled or \
'n' in self.notification_options:
logger.debug("Service: %s, notification %s sending is blocked by configuration",
self.get_name(), n_type)
return True
# Does the notification period allow sending out this notification?
if notification_period is not None and not notification_period.is_time_valid(t_wished):
logger.debug("Service: %s, notification %s sending is blocked by globals",
self.get_name(), n_type)
return True
if n_type in (u'PROBLEM', u'RECOVERY') and (
self.state == u'UNKNOWN' and 'u' not in self.notification_options or
self.state == u'WARNING' and 'w' not in self.notification_options or
self.state == u'CRITICAL' and 'c' not in self.notification_options or
self.state == u'OK' and 'r' not in self.notification_options or
self.state == u'UNREACHABLE' and 'x' not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
self.get_name(), n_type, self.notification_options)
return True
if (n_type in [u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'] and
'f' not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
n_type, self.get_full_name(), self.notification_options)
return True
if (n_type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'] and
's' not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
n_type, self.get_full_name(), self.notification_options)
return True
# Acknowledgements make no sense when the status is ok/up
if n_type in [u'ACKNOWLEDGEMENT'] and self.state == self.ok_up:
logger.debug("Host: %s, notification %s sending is blocked by current state",
self.get_name(), n_type)
return True
# Block if host is in a scheduled downtime
if host.scheduled_downtime_depth > 0:
logger.debug("Service: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
# When in deep downtime, only allow end-of-downtime notifications
# In depth 1 the downtime just started and can be notified
if self.scheduled_downtime_depth > 1 and n_type not in (u'DOWNTIMEEND',
u'DOWNTIMECANCELLED'):
logger.debug("Service: %s, notification %s sending is blocked by deep downtime",
self.get_name(), n_type)
return True
# Block if in a scheduled downtime and a problem arises, or flapping event
if self.scheduled_downtime_depth > 0 and n_type in \
[u'PROBLEM', u'RECOVERY', u'ACKNOWLEDGEMENT',
u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED']:
logger.debug("Service: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
# Block if the status is SOFT
# Block if the problem has already been acknowledged
# Block if flapping
# Block if host is down
if self.state_type == u'SOFT' and n_type == u'PROBLEM' or \
self.problem_has_been_acknowledged and n_type != u'ACKNOWLEDGEMENT' or \
self.is_flapping and n_type not in [u'FLAPPINGSTART',
u'FLAPPINGSTOP',
u'FLAPPINGDISABLED'] or \
host.state != host.ok_up:
logger.debug("Service: %s, notification %s sending is blocked by soft state, "
"acknowledgement, flapping or host DOWN", self.get_name(), n_type)
return True
# Block if business rule smart notifications is enabled and all its
# children have been acknowledged or are under downtime.
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked(hosts, services) is True \
and n_type == u'PROBLEM':
logger.debug("Service: %s, notification %s sending is blocked by business rules",
self.get_name(), n_type)
return True
logger.debug("Service: %s, notification %s sending is not blocked", self.get_name(), n_type)
return False | [
"Check if a notification is blocked by the service.\n Conditions are ONE of the following::\n\n * enable_notification is False (global)\n * not in a notification_period\n * notifications_enable is False (local)\n * notification_options is 'n' or matches the state ('UNKNOWN' <=> 'u' ...)\n (include flapping and downtimes)\n * state goes ok and type is 'ACKNOWLEDGEMENT' (no sense)\n * scheduled_downtime_depth > 0 and flapping (host is in downtime)\n * scheduled_downtime_depth > 1 and not downtime end (deep downtime)\n * scheduled_downtime_depth > 0 and problem or recovery (host is in downtime)\n * SOFT state of a problem (we raise notification ony on HARD state)\n * ACK notification when already ACK (don't raise again ACK)\n * not flapping notification in a flapping state\n * business rule smart notifications is enabled and all its children have been acknowledged\n or are under downtime\n * linked host is not up\n * linked host is in downtime\n\n :param n_type: notification type\n :type n_type:\n :param t_wished: the time we should like to notify the host (mostly now)\n :type t_wished: float\n :return: True if ONE of the above condition was met, otherwise False\n :rtype: bool\n TODO: Refactor this, a lot of code duplication with Host.is_blocking_notifications\n "
] |
Please provide a description of the function:def get_short_status(self, hosts, services):
mapping = {
0: "O",
1: "W",
2: "C",
3: "U",
4: "N",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return mapping.get(self.state_id, "n/a") | [
"Get the short status of this host\n\n :return: \"O\", \"W\", \"C\", \"U', or \"n/a\" based on service state_id or business_rule state\n :rtype: str\n "
] |
Please provide a description of the function:def get_status(self, hosts, services):
if self.got_business_rule:
mapping = {
0: u'OK',
1: u'WARNING',
2: u'CRITICAL',
3: u'UNKNOWN',
4: u'UNREACHABLE',
}
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return self.state | [
"Get the status of this host\n\n :return: \"OK\", \"WARNING\", \"CRITICAL\", \"UNKNOWN\" or \"n/a\" based on\n service state_id or business_rule state\n :rtype: str\n "
] |
Please provide a description of the function:def add_template(self, tpl):
objcls = self.inner_class.my_type
name = getattr(tpl, 'name', '')
sdesc = getattr(tpl, 'service_description', '')
hname = getattr(tpl, 'host_name', '')
logger.debug("Adding a %s template: host_name: %s, name: %s, service_description: %s",
objcls, hname, name, sdesc)
if not name and not hname:
msg = "a %s template has been defined without name nor host_name. from: %s" \
% (objcls, tpl.imported_from)
tpl.add_error(msg)
elif not name and not sdesc:
msg = "a %s template has been defined without name nor service_description. from: %s" \
% (objcls, tpl.imported_from)
tpl.add_error(msg)
elif not name:
# If name is not defined, use the host_name_service_description as name (fix #791)
setattr(tpl, 'name', "%s_%s" % (hname, sdesc))
tpl = self.index_template(tpl)
elif name:
tpl = self.index_template(tpl)
self.templates[tpl.uuid] = tpl
logger.debug('\tAdded service template #%d %s', len(self.templates), tpl) | [
"\n Adds and index a template into the `templates` container.\n\n This implementation takes into account that a service has two naming\n attribute: `host_name` and `service_description`.\n\n :param tpl: The template to add\n :type tpl:\n :return: None\n "
] |
Please provide a description of the function:def apply_inheritance(self):
super(Services, self).apply_inheritance()
# add_item only ensure we can build a key for services later (after explode)
for item in list(self.items.values()):
self.add_item(item, False) | [
" For all items and templates inherit properties and custom\n variables.\n\n :return: None\n "
] |
Please provide a description of the function:def find_srvs_by_hostname(self, host_name):
if hasattr(self, 'hosts'):
host = self.hosts.find_by_name(host_name)
if host is None:
return None
return host.get_services()
return None | [
"Get all services from a host based on a host_name\n\n :param host_name: the host name we want services\n :type host_name: str\n :return: list of services\n :rtype: list[alignak.objects.service.Service]\n "
] |
Please provide a description of the function:def find_srv_by_name_and_hostname(self, host_name, sdescr):
key = (host_name, sdescr)
return self.name_to_item.get(key, None) | [
"Get a specific service based on a host_name and service_description\n\n :param host_name: host name linked to needed service\n :type host_name: str\n :param sdescr: service name we need\n :type sdescr: str\n :return: the service found or None\n :rtype: alignak.objects.service.Service\n "
] |
Please provide a description of the function:def linkify(self, hosts, commands, timeperiods, contacts, # pylint: disable=R0913
resultmodulations, businessimpactmodulations, escalations,
servicegroups, checkmodulations, macromodulations):
self.linkify_with_timeperiods(timeperiods, 'notification_period')
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
self.linkify_with_timeperiods(timeperiods, 'snapshot_period')
self.linkify_s_by_hst(hosts)
self.linkify_s_by_sg(servicegroups)
self.linkify_one_command_with_commands(commands, 'check_command')
self.linkify_one_command_with_commands(commands, 'event_handler')
self.linkify_one_command_with_commands(commands, 'snapshot_command')
self.linkify_with_contacts(contacts)
self.linkify_with_resultmodulations(resultmodulations)
self.linkify_with_business_impact_modulations(businessimpactmodulations)
# WARNING: all escalations will not be link here
# (just the escalation here, not serviceesca or hostesca).
# This last one will be link in escalations linkify.
self.linkify_with_escalations(escalations)
self.linkify_with_checkmodulations(checkmodulations)
self.linkify_with_macromodulations(macromodulations) | [
"Create link between objects::\n\n * service -> host\n * service -> command\n * service -> timeperiods\n * service -> contacts\n\n :param hosts: hosts to link\n :type hosts: alignak.objects.host.Hosts\n :param timeperiods: timeperiods to link\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param commands: commands to link\n :type commands: alignak.objects.command.Commands\n :param contacts: contacts to link\n :type contacts: alignak.objects.contact.Contacts\n :param resultmodulations: resultmodulations to link\n :type resultmodulations: alignak.objects.resultmodulation.Resultmodulations\n :param businessimpactmodulations: businessimpactmodulations to link\n :type businessimpactmodulations:\n alignak.objects.businessimpactmodulation.Businessimpactmodulations\n :param escalations: escalations to link\n :type escalations: alignak.objects.escalation.Escalations\n :param servicegroups: servicegroups to link\n :type servicegroups: alignak.objects.servicegroup.Servicegroups\n :param checkmodulations: checkmodulations to link\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param macromodulations: macromodulations to link\n :type macromodulations: alignak.objects.macromodulation.Macromodulations\n :return: None\n "
] |
Please provide a description of the function:def override_properties(self, hosts):
ovr_re = re.compile(r'^([^,]+),([^\s]+)\s+(.*)$')
ovr_hosts = [h for h in hosts if getattr(h, 'service_overrides', None)]
for host in ovr_hosts:
# We're only looking for hosts having service overrides defined
if isinstance(host.service_overrides, list):
service_overrides = host.service_overrides
else:
service_overrides = [host.service_overrides]
for ovr in service_overrides:
# Checks service override syntax
match = ovr_re.search(ovr)
if match is None:
host.add_error("Error: invalid service override syntax: %s" % ovr)
continue
sdescr, prop, value = match.groups()
# Looks for corresponding service
service = self.find_srv_by_name_and_hostname(getattr(host, "host_name", ""), sdescr)
if service is None:
host.add_error("Error: trying to override property '%s' on service '%s' "
"but it's unknown for this host" % (prop, sdescr))
continue
# Checks if override is allowed
excludes = ['host_name', 'service_description', 'use',
'servicegroups', 'trigger_name']
if prop in excludes:
host.add_error("Error: trying to override '%s', "
"a forbidden property for service '%s'" % (prop, sdescr))
continue
# Pythonize the value because here value is str.
setattr(service, prop, service.properties[prop].pythonize(value)) | [
"Handle service_overrides property for hosts\n ie : override properties for relevant services\n\n :param hosts: hosts we need to apply override properties\n :type hosts: alignak.objects.host.Hosts\n :return: None\n "
] |
Please provide a description of the function:def linkify_s_by_hst(self, hosts):
for serv in self:
# If we do not have a host_name, we set it as
# a template element to delete. (like Nagios)
if not hasattr(serv, 'host_name'):
serv.host = None
continue
try:
hst_name = serv.host_name
# The new member list, in id
hst = hosts.find_by_name(hst_name)
# Let the host know we are his service
if hst is not None:
serv.host = hst.uuid
hst.add_service_link(serv.uuid)
else: # Ok, the host do not exists!
err = "Warning: the service '%s' got an invalid host_name '%s'" % \
(serv.get_name(), hst_name)
serv.configuration_warnings.append(err)
continue
except AttributeError:
pass | [
"Link services with their parent host\n\n :param hosts: Hosts to look for simple host\n :type hosts: alignak.objects.host.Hosts\n :return: None\n "
] |
Please provide a description of the function:def linkify_s_by_sg(self, servicegroups):
for serv in self:
new_servicegroups = []
if hasattr(serv, 'servicegroups') and serv.servicegroups != '':
for sg_name in serv.servicegroups:
sg_name = sg_name.strip()
servicegroup = servicegroups.find_by_name(sg_name)
if servicegroup is not None:
new_servicegroups.append(servicegroup.uuid)
else:
err = "Error: the servicegroup '%s' of the service '%s' is unknown" %\
(sg_name, serv.get_dbg_name())
serv.add_error(err)
serv.servicegroups = new_servicegroups | [
"Link services with servicegroups\n\n :param servicegroups: Servicegroups\n :type servicegroups: alignak.objects.servicegroup.Servicegroups\n :return: None\n "
] |
Please provide a description of the function:def apply_implicit_inheritance(self, hosts):
for prop in ('contacts', 'contact_groups', 'notification_interval',
'notification_period', 'resultmodulations', 'business_impact_modulations',
'escalations', 'poller_tag', 'reactionner_tag', 'check_period',
'business_impact', 'maintenance_period'):
for serv in self:
if hasattr(serv, 'host_name') and not getattr(serv, prop, None):
host = hosts.find_by_name(serv.host_name)
if host is not None and hasattr(host, prop):
logger.debug("Implicit inheritance for %s/%s: %s = %s",
serv.host_name, serv, prop, getattr(host, prop))
setattr(serv, prop, getattr(host, prop)) | [
"Apply implicit inheritance for special properties:\n contact_groups, notification_interval , notification_period\n So service will take info from host if necessary\n\n :param hosts: hosts list needed to look for a simple host\n :type hosts: alignak.objects.host.Hosts\n :return: None\n "
] |
Please provide a description of the function:def apply_dependencies(self, hosts):
for service in self:
if service.host and service.host_dependency_enabled:
host = hosts[service.host]
if host.active_checks_enabled:
service.act_depend_of.append(
(service.host, ['d', 'x', 's', 'f'], '', True)
)
host.act_depend_of_me.append(
(service.uuid, ['d', 'x', 's', 'f'], '', True)
)
host.child_dependencies.add(service.uuid)
service.parent_dependencies.add(service.host) | [
"Wrapper to loop over services and call Service.fill_daddy_dependency()\n\n :return: None\n "
] |
Please provide a description of the function:def clean(self):
to_del = []
for serv in self:
if not serv.host:
to_del.append(serv.uuid)
for service_uuid in to_del:
del self.items[service_uuid] | [
"Remove services without host object linked to\n\n Note that this should not happen!\n\n :return: None\n "
] |
Please provide a description of the function:def explode_services_from_hosts(self, hosts, service, hnames):
duplicate_for_hosts = [] # get the list of our host_names if more than 1
not_hosts = [] # the list of !host_name so we remove them after
for hname in hnames:
hname = hname.strip()
# If the name begin with a !, we put it in
# the not list
if hname.startswith('!'):
not_hosts.append(hname[1:])
else: # the standard list
duplicate_for_hosts.append(hname)
# remove duplicate items from duplicate_for_hosts:
duplicate_for_hosts = list(set(duplicate_for_hosts))
# Ok now we clean the duplicate_for_hosts with all hosts
# of the not
for hname in not_hosts:
try:
duplicate_for_hosts.remove(hname)
except IndexError:
pass
# Now we duplicate the service for all host_names
for hname in duplicate_for_hosts:
host = hosts.find_by_name(hname)
if host is None:
service.add_error("Error: The hostname %s is unknown for the service %s!"
% (hname, service.get_name()))
continue
if host.is_excluded_for(service):
continue
new_s = service.copy()
new_s.host_name = hname
self.add_item(new_s) | [
"\n Explodes a service based on a list of hosts.\n\n :param hosts: The hosts container\n :type hosts:\n :param service: The base service to explode\n :type service:\n :param hnames: The host_name list to explode service on\n :type hnames: str\n :return: None\n "
] |
Please provide a description of the function:def _local_create_service(self, hosts, host_name, service):
host = hosts.find_by_name(host_name.strip())
if host.is_excluded_for(service):
return None
# Creates a real service instance from the template
new_s = service.copy()
new_s.host_name = host_name
new_s.register = 1
self.add_item(new_s)
return new_s | [
"Create a new service based on a host_name and service instance.\n\n :param hosts: The hosts items instance.\n :type hosts: alignak.objects.host.Hosts\n :param host_name: The host_name to create a new service.\n :type host_name: str\n :param service: The service to be used as template.\n :type service: Service\n :return: The new service created.\n :rtype: alignak.objects.service.Service\n "
] |
Please provide a description of the function:def explode_services_from_templates(self, hosts, service_template):
hname = getattr(service_template, "host_name", None)
if not hname:
logger.debug("Service template %s is declared without an host_name",
service_template.get_name())
return
logger.debug("Explode services %s for the host: %s", service_template.get_name(), hname)
# Now really create the services
if is_complex_expr(hname):
hnames = self.evaluate_hostgroup_expression(
hname.strip(), hosts, hosts.templates, look_in='templates')
for name in hnames:
self._local_create_service(hosts, name, service_template)
else:
hnames = [n.strip() for n in hname.split(',') if n.strip()]
for hname in hnames:
for name in hosts.find_hosts_that_use_template(hname):
self._local_create_service(hosts, name, service_template) | [
"\n Explodes services from templates. All hosts holding the specified\n templates are bound with the service.\n\n :param hosts: The hosts container.\n :type hosts: alignak.objects.host.Hosts\n :param service_template: The service to explode.\n :type service_template: alignak.objects.service.Service\n :return: None\n "
] |
Please provide a description of the function:def explode_services_duplicates(self, hosts, service):
hname = getattr(service, "host_name", None)
if hname is None:
return
# the generator case, we must create several new services
# we must find our host, and get all key:value we need
host = hosts.find_by_name(hname.strip())
if host is None:
service.add_error('Error: The hostname %s is unknown for the service %s!'
% (hname, service.get_name()))
return
# Duplicate services
for new_s in service.duplicate(host):
if host.is_excluded_for(new_s):
continue
# Adds concrete instance
self.add_item(new_s) | [
"\n Explodes services holding a `duplicate_foreach` clause.\n\n :param hosts: The hosts container\n :type hosts: alignak.objects.host.Hosts\n :param service: The service to explode\n :type service: alignak.objects.service.Service\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.