Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def linkify_templates(self):
# First we create a list of all templates
for i in itertools.chain(iter(list(self.items.values())),
iter(list(self.templates.values()))):
self.linkify_item_templates(i)
for i in self:
i.tags = self.get_all_tags(i) | [
"\n Link all templates, and create the template graph too\n\n :return: None\n "
] |
Please provide a description of the function:def is_correct(self):
# we are ok at the beginning. Hope we are still ok at the end...
valid = True
# Better check individual items before displaying the global items list errors and warnings
for i in self:
# Alias and display_name hook hook
# prop_name = getattr(self.__class__, 'name_property', None)
# if prop_name and not getattr(i, 'alias', '') and hasattr(i, prop_name):
# setattr(i, 'alias', getattr(i, prop_name))
# if prop_name and getattr(i, 'display_name', '') and hasattr(i, prop_name):
# setattr(i, 'display_name', getattr(i, prop_name))
# Now other checks
if not i.is_correct():
valid = False
i.add_error("Configuration in %s::%s is incorrect; from: %s"
% (i.my_type, i.get_name(), i.imported_from))
if i.configuration_errors:
self.configuration_errors += i.configuration_errors
if i.configuration_warnings:
self.configuration_warnings += i.configuration_warnings
# Raise all previous warnings
if self.configuration_warnings:
for msg in self.configuration_warnings:
logger.warning("[items] %s", msg)
# Raise all previous errors
if self.configuration_errors:
valid = False
for msg in self.configuration_errors:
logger.error("[items] %s", msg)
return valid | [
"\n Check if the items list configuration is correct ::\n\n * check if duplicate items exist in the list and warn about this\n * set alias and display_name property for each item in the list if they do not exist\n * check each item in the list\n * log all previous warnings\n * log all previous errors\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def serialize(self):
res = {}
for key, item in list(self.items.items()):
res[key] = item.serialize()
return res | [
"This function serialize items into a simple dict object.\n It is used when transferring data to other daemons over the network (http)\n\n Here is the generic function that simply serialize each item of the items object\n\n :return: Dictionary containing item's uuid as key and item as value\n :rtype: dict\n "
] |
Please provide a description of the function:def apply_partial_inheritance(self, prop):
for i in itertools.chain(iter(list(self.items.values())),
iter(list(self.templates.values()))):
self.get_property_by_inheritance(i, prop)
# If a "null" attribute was inherited, delete it
try:
if getattr(i, prop) == 'null':
delattr(i, prop)
except AttributeError: # pragma: no cover, simple protection
pass | [
"\n Define property with inheritance value of the property\n\n :param prop: property\n :type prop: str\n :return: None\n "
] |
Please provide a description of the function:def apply_inheritance(self):
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
cls = self.inner_class
for prop in cls.properties:
self.apply_partial_inheritance(prop)
for i in itertools.chain(iter(list(self.items.values())),
iter(list(self.templates.values()))):
self.get_customs_properties_by_inheritance(i) | [
"\n For all items and templates inherit properties and custom variables.\n\n :return: None\n "
] |
Please provide a description of the function:def linkify_with_contacts(self, contacts):
for i in self:
if not hasattr(i, 'contacts'):
continue
links_list = strip_and_uniq(i.contacts)
new = []
for name in [e for e in links_list if e]:
contact = contacts.find_by_name(name)
if contact is not None and contact.uuid not in new:
new.append(contact.uuid)
else:
i.add_error("the contact '%s' defined for '%s' is unknown"
% (name, i.get_name()))
i.contacts = new | [
"\n Link items with contacts items\n\n :param contacts: all contacts object\n :type contacts: alignak.objects.contact.Contacts\n :return: None\n "
] |
Please provide a description of the function:def linkify_with_escalations(self, escalations):
for i in self:
if not hasattr(i, 'escalations'):
continue
links_list = strip_and_uniq(i.escalations)
new = []
for name in [e for e in links_list if e]:
escalation = escalations.find_by_name(name)
if escalation is not None and escalation.uuid not in new:
new.append(escalation.uuid)
else:
i.add_error("the escalation '%s' defined for '%s' is unknown"
% (name, i.get_name()))
i.escalations = new | [
"\n Link with escalations\n\n :param escalations: all escalations object\n :type escalations: alignak.objects.escalation.Escalations\n :return: None\n "
] |
Please provide a description of the function:def explode_contact_groups_into_contacts(item, contactgroups):
if not hasattr(item, 'contact_groups'):
return
# TODO : See if we can remove this if
cgnames = ''
if item.contact_groups:
if isinstance(item.contact_groups, list):
cgnames = item.contact_groups
else:
cgnames = item.contact_groups.split(',')
cgnames = strip_and_uniq(cgnames)
for cgname in cgnames:
contactgroup = contactgroups.find_by_name(cgname)
if not contactgroup:
item.add_error("The contact group '%s' defined on the %s '%s' do not exist"
% (cgname, item.__class__.my_type, item.get_name()))
continue
cnames = contactgroups.get_members_of_group(cgname)
# We add contacts into our contacts
if cnames:
if hasattr(item, 'contacts'):
# Fix #1054 - bad contact explosion
# item.contacts.extend(cnames)
item.contacts = item.contacts + cnames
else:
item.contacts = cnames | [
"\n Get all contacts of contact_groups and put them in contacts container\n\n :param item: item where have contact_groups property\n :type item: object\n :param contactgroups: all contactgroups object\n :type contactgroups: alignak.objects.contactgroup.Contactgroups\n :return: None\n "
] |
Please provide a description of the function:def linkify_with_timeperiods(self, timeperiods, prop):
for i in self:
if not hasattr(i, prop):
continue
tpname = getattr(i, prop).strip()
# some default values are '', so set None
if not tpname:
setattr(i, prop, '')
continue
# Ok, get a real name, search for it
timeperiod = timeperiods.find_by_name(tpname)
if timeperiod is None:
i.add_error("The %s of the %s '%s' named '%s' is unknown!"
% (prop, i.__class__.my_type, i.get_name(), tpname))
continue
setattr(i, prop, timeperiod.uuid) | [
"\n Link items with timeperiods items\n\n :param timeperiods: all timeperiods object\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :param prop: property name\n :type prop: str\n :return: None\n "
] |
Please provide a description of the function:def linkify_with_checkmodulations(self, checkmodulations):
for i in self:
if not hasattr(i, 'checkmodulations'):
continue
links_list = strip_and_uniq(i.checkmodulations)
new = []
for name in [e for e in links_list if e]:
modulation = checkmodulations.find_by_name(name)
if modulation is not None and modulation.uuid not in new:
new.append(modulation.uuid)
else:
i.add_error("The checkmodulations of the %s '%s' named "
"'%s' is unknown!" % (i.__class__.my_type, i.get_name(), name))
i.checkmodulations = new | [
"\n Link checkmodulation object\n\n :param checkmodulations: checkmodulations object\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :return: None\n "
] |
Please provide a description of the function:def linkify_s_by_module(self, modules):
for i in self:
links_list = strip_and_uniq(i.modules)
new = []
for name in [e for e in links_list if e]:
module = modules.find_by_name(name)
if module is not None and module.uuid not in new:
new.append(module)
else:
i.add_error("Error: the module %s is unknown for %s" % (name, i.get_name()))
i.modules = new | [
"\n Link modules to items\n\n :param modules: Modules object (list of all the modules found in the configuration)\n :type modules: alignak.objects.module.Modules\n :return: None\n "
] |
Please provide a description of the function:def evaluate_hostgroup_expression(expr, hosts, hostgroups, look_in='hostgroups'):
# Maybe exp is a list, like numerous hostgroups entries in a service, link them
if isinstance(expr, list):
expr = '|'.join(expr)
if look_in == 'hostgroups':
node = ComplexExpressionFactory(look_in, hostgroups, hosts)
else: # templates
node = ComplexExpressionFactory(look_in, hosts, hosts)
expr_tree = node.eval_cor_pattern(expr)
set_res = expr_tree.resolve_elements()
# HOOK DBG
return list(set_res) | [
"\n Evaluate hostgroup expression\n\n :param expr: an expression\n :type expr: str\n :param hosts: hosts object (all hosts)\n :type hosts: alignak.objects.host.Hosts\n :param hostgroups: hostgroups object (all hostgroups)\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :param look_in: item name where search\n :type look_in: str\n :return: return list of hostgroups\n :rtype: list\n "
] |
Please provide a description of the function:def get_hosts_from_hostgroups(hgname, hostgroups):
if not isinstance(hgname, list):
hgname = [e.strip() for e in hgname.split(',') if e.strip()]
host_names = []
for name in hgname:
hostgroup = hostgroups.find_by_name(name)
if hostgroup is None:
raise ValueError("the hostgroup '%s' is unknown" % hgname)
mbrs = [h.strip() for h in hostgroup.get_hosts() if h.strip()]
host_names.extend(mbrs)
return host_names | [
"\n Get hosts of hostgroups\n\n :param hgname: hostgroup name\n :type hgname: str\n :param hostgroups: hostgroups object (all hostgroups)\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :return: list of hosts\n :rtype: list\n "
] |
Please provide a description of the function:def explode_host_groups_into_hosts(self, item, hosts, hostgroups):
hnames_list = []
# Gets item's hostgroup_name
hgnames = getattr(item, "hostgroup_name", '') or ''
# Defines if hostgroup is a complex expression
# Expands hostgroups
if is_complex_expr(hgnames):
hnames_list.extend(self.evaluate_hostgroup_expression(
item.hostgroup_name, hosts, hostgroups))
elif hgnames:
try:
hnames_list.extend(
self.get_hosts_from_hostgroups(hgnames, hostgroups))
except ValueError as err: # pragma: no cover, simple protection
item.add_error(str(err))
# Expands host names
hname = getattr(item, "host_name", '')
hnames_list.extend([n.strip() for n in hname.split(',') if n.strip()])
hnames = set()
for host in hnames_list:
# If the host start with a !, it's to be removed from
# the hostgroup get list
if host.startswith('!'):
hst_to_remove = host[1:].strip()
try:
hnames.remove(hst_to_remove)
except KeyError:
pass
elif host == '*':
hnames.update([host.host_name for host
in hosts.items.values() if getattr(host, 'host_name', '')])
# Else it's a host to add, but maybe it's ALL
else:
hnames.add(host)
item.host_name = ','.join(hnames) | [
"\n Get all hosts of hostgroups and add all in host_name container\n\n :param item: the item object\n :type item: alignak.objects.item.Item\n :param hosts: hosts object\n :type hosts: alignak.objects.host.Hosts\n :param hostgroups: hostgroups object\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :return: None\n "
] |
Please provide a description of the function:def no_loop_in_parents(self, attr1, attr2):
# pylint: disable=too-many-branches
# Ok, we say "from now, no loop :) "
# in_loop = []
# Create parent graph
parents = Graph()
# Start with all items as nodes
for item in self:
# Hack to get self here. Used when looping on host and host parent's
if attr1 == "self":
obj = item.uuid # obj is a host/service [list]
else:
obj = getattr(item, attr1, None)
if obj is not None:
if isinstance(obj, list):
for sobj in obj:
parents.add_node(sobj)
else:
parents.add_node(obj)
# And now fill edges
# pylint: disable=too-many-nested-blocks
for item in self:
if attr1 == "self":
obj1 = item.uuid
else:
obj1 = getattr(item, attr1, None)
obj2 = getattr(item, attr2, None)
if obj2 is not None:
if isinstance(obj2, list):
for sobj2 in obj2:
if isinstance(obj1, list):
for sobj1 in obj1:
parents.add_edge(sobj1, sobj2)
else:
parents.add_edge(obj1, sobj2)
else:
if isinstance(obj1, list):
for sobj1 in obj1:
parents.add_edge(sobj1, obj2)
else:
parents.add_edge(obj1, obj2)
return parents.loop_check() | [
"\n Find loop in dependencies.\n For now, used with the following attributes :\n :(self, parents):\n host dependencies from host object\n :(host_name, dependent_host_name):\\\n host dependencies from hostdependencies object\n :(service_description, dependent_service_description):\n service dependencies from servicedependencies object\n\n :param attr1: attribute name\n :type attr1: str\n :param attr2: attribute name\n :type attr2: str\n :return: list\n :rtype: list\n "
] |
Please provide a description of the function:def get_property_by_inheritance(self, obj, prop):
# pylint: disable=too-many-branches, too-many-nested-blocks
if prop == 'register':
# We do not inherit the register property
return None
# If I have the property, I take mine but I check if I must add a plus property
if hasattr(obj, prop):
value = getattr(obj, prop)
# Manage the additive inheritance for the property,
# if property is in plus, add or replace it
# Template should keep the '+' at the beginning of the chain
if obj.has_plus(prop):
if not isinstance(value, list):
value = [value]
value.insert(0, obj.get_plus_and_delete(prop))
value = list(set(value))
if obj.is_tpl():
value.insert(0, '+')
# Clean the returned value
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_tpl():
while '+' in value:
value.remove('+')
return value
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
# We reverse list, so that when looking for properties by inheritance,
# the least defined template wins (if property is set).
for t_id in obj.templates:
template = self.templates[t_id]
value = self.get_property_by_inheritance(template, prop)
if value is None or (isinstance(value, list) and not value):
continue
# If our template give us a '+' value, we continue the loop
still_loop = False
if isinstance(value, list) and value[0] == '+':
# Templates should keep their + inherited from their parents
if not obj.is_tpl():
value = list(value)
value = value[1:]
still_loop = True
# Maybe in the previous loop, we set a value, use it too
if hasattr(obj, prop):
# If the current value is a string, it will simplify the problem
if isinstance(value, (list, string_types)) and value and value[0] == '+':
# In this case we can remove the + from our current
# tpl because our value will be final
new_val = list(getattr(obj, prop))
new_val.extend(value[1:])
value = new_val
else: # If not, we should keep the + sign of need
new_val = list(getattr(obj, prop))
new_val.extend(value)
value = new_val
# Ok, we can set it and uniquify a list if needed
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_tpl():
while '+' in value:
value.remove('+')
setattr(obj, prop, value)
# If we only got some '+' values, we must still loop
# for an end value without it
if not still_loop:
# And set my own value in the end if need
if obj.has_plus(prop):
# value = list(getattr(obj, prop, []))
value = list(value)
value.extend(obj.get_plus_and_delete(prop))
# Template should keep their '+'
if obj.is_tpl() and value[0] != '+':
value.insert(0, '+')
# Clean the returned value
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_tpl():
while '+' in value:
value.remove('+')
setattr(obj, prop, value)
return value
# Maybe templates only give us + values, so we didn't quit, but we already got a
# self.prop value after all
template_with_only_plus = hasattr(obj, prop)
# I do not have endingprop, my templates too... Maybe a plus?
# warning: if all my templates gave me '+' values, do not forgot to
# add the already set self.prop value
if obj.has_plus(prop):
if template_with_only_plus:
value = list(getattr(obj, prop))
value.extend(obj.get_plus_and_delete(prop))
else:
value = obj.get_plus_and_delete(prop)
# Template should keep their '+' chain
# We must say it's a '+' value, so our son will know that it must continue looping
if obj.is_tpl() and value != [] and value[0] != '+':
value.insert(0, '+')
# Clean the returned value
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_tpl():
while '+' in value:
value.remove('+')
setattr(obj, prop, value)
return value
# Ok so in the end, we give the value we got if we have one, or None
# Not even a plus... so None :)
return getattr(obj, prop, None) | [
"\n Get the property asked in parameter to this object or from defined templates of this\n object\n\n todo: rewrite this function which is really too complex!\n\n :param obj: the object to search the property\n :type obj: alignak.objects.item.Item\n :param prop: name of property\n :type prop: str\n :return: Value of property of this object or of a template\n :rtype: str or None\n "
] |
Please provide a description of the function:def get_customs_properties_by_inheritance(self, obj):
for t_id in obj.templates:
template = self.templates[t_id]
tpl_cv = self.get_customs_properties_by_inheritance(template)
if tpl_cv:
for prop in tpl_cv:
if prop not in obj.customs:
value = tpl_cv[prop]
else:
value = obj.customs[prop]
if obj.has_plus(prop):
value.insert(0, obj.get_plus_and_delete(prop))
# value = self.get_plus_and_delete(prop) + ',' + value
obj.customs[prop] = value
for prop in obj.customs:
value = obj.customs[prop]
if obj.has_plus(prop):
value.insert(0, obj.get_plus_and_delete(prop))
obj.customs[prop] = value
# We can get custom properties in plus, we need to get all
# entires and put
# them into customs
cust_in_plus = obj.get_all_plus_and_delete()
for prop in cust_in_plus:
obj.customs[prop] = cust_in_plus[prop]
return obj.customs | [
"\n Get custom properties from the templates defined in this object\n\n :param obj: the oject to search the property\n :type obj: alignak.objects.item.Item\n :return: list of custom properties\n :rtype: list\n "
] |
Please provide a description of the function:def add_edge(self, from_node, to_node):
# Maybe to_node is unknown
if to_node not in self.nodes:
self.add_node(to_node)
try:
self.nodes[from_node]["sons"].append(to_node)
# If from_node does not exist, add it with its son
except KeyError:
self.nodes[from_node] = {"dfs_loop_status": "", "sons": [to_node]} | [
"Add edge between two node\n The edge is oriented\n\n :param from_node: node where edge starts\n :type from_node: object\n :param to_node: node where edge ends\n :type to_node: object\n :return: None\n "
] |
Please provide a description of the function:def loop_check(self):
in_loop = []
# Add the tag for dfs check
for node in list(self.nodes.values()):
node['dfs_loop_status'] = 'DFS_UNCHECKED'
# Now do the job
for node_id, node in self.nodes.items():
# Run the dfs only if the node has not been already done */
if node['dfs_loop_status'] == 'DFS_UNCHECKED':
self.dfs_loop_search(node_id)
# If LOOP_INSIDE, must be returned
if node['dfs_loop_status'] == 'DFS_LOOP_INSIDE':
in_loop.append(node_id)
# Remove the tag
for node in list(self.nodes.values()):
del node['dfs_loop_status']
return in_loop | [
"Check if we have a loop in the graph\n\n :return: Nodes in loop\n :rtype: list\n "
] |
Please provide a description of the function:def dfs_loop_search(self, root):
# Make the root temporary checked
self.nodes[root]['dfs_loop_status'] = 'DFS_TEMPORARY_CHECKED'
# We are scanning the sons
for child in self.nodes[root]["sons"]:
child_status = self.nodes[child]['dfs_loop_status']
# If a child is not checked, check it
if child_status == 'DFS_UNCHECKED':
self.dfs_loop_search(child)
child_status = self.nodes[child]['dfs_loop_status']
# If a child has already been temporary checked, it's a problem,
# loop inside, and its a checked status
if child_status == 'DFS_TEMPORARY_CHECKED':
self.nodes[child]['dfs_loop_status'] = 'DFS_LOOP_INSIDE'
self.nodes[root]['dfs_loop_status'] = 'DFS_LOOP_INSIDE'
# If a child has already been temporary checked, it's a problem, loop inside
if child_status in ('DFS_NEAR_LOOP', 'DFS_LOOP_INSIDE'):
# if a node is known to be part of a loop, do not let it be less
if self.nodes[root]['dfs_loop_status'] != 'DFS_LOOP_INSIDE':
self.nodes[root]['dfs_loop_status'] = 'DFS_NEAR_LOOP'
# We've already seen this child, it's a problem
self.nodes[child]['dfs_loop_status'] = 'DFS_LOOP_INSIDE'
# If root have been modified, do not set it OK
# A node is OK if and only if all of its children are OK
# if it does not have a child, goes ok
if self.nodes[root]['dfs_loop_status'] == 'DFS_TEMPORARY_CHECKED':
self.nodes[root]['dfs_loop_status'] = 'DFS_OK' | [
"Main algorithm to look for loop.\n It tags nodes and find ones stuck in loop.\n\n * Init all nodes with DFS_UNCHECKED value\n * DFS_TEMPORARY_CHECKED means we found it once\n * DFS_OK : this node (and all sons) are fine\n * DFS_NEAR_LOOP : One problem was found in of of the son\n * DFS_LOOP_INSIDE : This node is part of a loop\n\n :param root: Root of the dependency tree\n :type root:\n :return: None\n "
] |
Please provide a description of the function:def get_accessibility_packs(self):
packs = []
# Add the tag for dfs check
for node in list(self.nodes.values()):
node['dfs_loop_status'] = 'DFS_UNCHECKED'
for node_id, node in self.nodes.items():
# Run the dfs only if the node is not already done */
if node['dfs_loop_status'] == 'DFS_UNCHECKED':
packs.append(self.dfs_get_all_childs(node_id))
# Remove the tag
for node in list(self.nodes.values()):
del node['dfs_loop_status']
return packs | [
"Get accessibility packs of the graph:\n in one pack element are related in a way. Between packs, there is no relation at all.\n TODO: Make it work for directional graph too\n Because for now, edge must be father->son AND son->father\n\n :return: packs of nodes\n :rtype: list\n "
] |
Please provide a description of the function:def dfs_get_all_childs(self, root):
self.nodes[root]['dfs_loop_status'] = 'DFS_CHECKED'
ret = set()
# Me
ret.add(root)
# And my sons
ret.update(self.nodes[root]['sons'])
for child in self.nodes[root]['sons']:
# I just don't care about already checked children
if self.nodes[child]['dfs_loop_status'] == 'DFS_UNCHECKED':
ret.update(self.dfs_get_all_childs(child))
return list(ret) | [
"Recursively get all sons of this node\n\n :param root: node to get sons\n :type root:\n :return: sons\n :rtype: list\n "
] |
Please provide a description of the function:def identity(self):
res = self.app.get_id()
res.update({"start_time": self.start_time})
res.update({"running_id": self.running_id})
return res | [
"Get the daemon identity\n\n This will return an object containing some properties:\n - alignak: the Alignak instance name\n - version: the Alignak version\n - type: the daemon type\n - name: the daemon name\n\n :return: daemon identity\n :rtype: dict\n "
] |
Please provide a description of the function:def api(self):
functions = [x[0]for x in inspect.getmembers(self, predicate=inspect.ismethod)
if not x[0].startswith('_')]
full_api = {
'doc': u"When posting data you have to use the JSON format.",
'api': []
}
my_daemon_type = "%s" % getattr(self.app, 'type', 'unknown')
my_address = getattr(self.app, 'host_name', getattr(self.app, 'name', 'unknown'))
if getattr(self.app, 'address', '127.0.0.1') not in ['127.0.0.1']:
# If an address is explicitely specified, I must use it!
my_address = self.app.address
for fun in functions:
endpoint = {
'daemon': my_daemon_type,
'name': fun,
'doc': getattr(self, fun).__doc__,
'uri': '%s://%s:%s/%s' % (getattr(self.app, 'scheme', 'http'),
my_address,
self.app.port, fun),
'args': {}
}
try:
spec = inspect.getfullargspec(getattr(self, fun))
except Exception: # pylint: disable=broad-except
# pylint: disable=deprecated-method
spec = inspect.getargspec(getattr(self, fun))
args = [a for a in spec.args if a not in ('self', 'cls')]
if spec.defaults:
a_dict = dict(list(zip(args, spec.defaults)))
else:
a_dict = dict(list(zip(args, ("No default value",) * len(args))))
endpoint["args"] = a_dict
full_api['api'].append(endpoint)
return full_api | [
"List the methods available on the daemon Web service interface\n\n :return: a list of methods and parameters\n :rtype: dict\n "
] |
Please provide a description of the function:def stop_request(self, stop_now='0'):
self.app.interrupted = (stop_now == '1')
self.app.will_stop = True
return True | [
"Request the daemon to stop\n\n If `stop_now` is set to '1' the daemon will stop now. Else, the daemon\n will enter the stop wait mode. In this mode the daemon stops its activity and\n waits until it receives a new `stop_now` request to stop really.\n\n :param stop_now: stop now or go to stop wait mode\n :type stop_now: bool\n :return: None\n "
] |
Please provide a description of the function:def get_log_level(self):
level_names = {
logging.DEBUG: 'DEBUG', logging.INFO: 'INFO', logging.WARNING: 'WARNING',
logging.ERROR: 'ERROR', logging.CRITICAL: 'CRITICAL'
}
alignak_logger = logging.getLogger(ALIGNAK_LOGGER_NAME)
res = self.identity()
res.update({"log_level": alignak_logger.getEffectiveLevel(),
"log_level_name": level_names[alignak_logger.getEffectiveLevel()]})
return res | [
"Get the current daemon log level\n\n Returns an object with the daemon identity and a `log_level` property.\n\n running_id\n :return: current log level\n :rtype: str\n "
] |
Please provide a description of the function:def set_log_level(self, log_level=None):
if log_level is None:
log_level = cherrypy.request.json['log_level']
if log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
return {'_status': u'ERR',
'_message': u"Required log level is not allowed: %s" % log_level}
alignak_logger = logging.getLogger(ALIGNAK_LOGGER_NAME)
alignak_logger.setLevel(log_level)
return self.get_log_level() | [
"Set the current log level for the daemon\n\n The `log_level` parameter must be in [DEBUG, INFO, WARNING, ERROR, CRITICAL]\n\n In case of any error, this function returns an object containing some properties:\n '_status': 'ERR' because of the error\n `_message`: some more explanations about the error\n\n Else, this function returns True\n\n :param log_level: a value in one of the above\n :type log_level: str\n :return: see above\n :rtype: dict\n "
] |
Please provide a description of the function:def stats(self, details=False):
if details is not False:
details = bool(details)
res = self.identity()
res.update(self.app.get_daemon_stats(details=details))
return res | [
"Get statistics and information from the daemon\n\n Returns an object with the daemon identity, the daemon start_time\n and some extra properties depending upon the daemon type.\n\n All daemons provide these ones:\n - program_start: the Alignak start timestamp\n - spare: to indicate if the daemon is a spare one\n - load: the daemon load\n - modules: the daemon modules information\n - counters: the specific daemon counters\n\n :param details: Details are required (different from 0)\n :type details str\n\n :return: daemon stats\n :rtype: dict\n "
] |
Please provide a description of the function:def _wait_new_conf(self):
with self.app.conf_lock:
logger.debug("My Arbiter wants me to wait for a new configuration.")
# Clear can occur while setting up a new conf and lead to error.
self.app.schedulers.clear()
self.app.cur_conf = {} | [
"Ask the daemon to drop its configuration and wait for a new one\n\n :return: None\n "
] |
Please provide a description of the function:def _push_configuration(self, pushed_configuration=None):
if pushed_configuration is None:
confs = cherrypy.request.json
pushed_configuration = confs['conf']
# It is safer to lock this part
with self.app.conf_lock:
self.app.new_conf = pushed_configuration
return True | [
"Send a new configuration to the daemon\n\n This function is not intended for external use. It is quite complex to\n build a configuration for a daemon and it is the arbiter dispatcher job ;)\n\n :param pushed_configuration: new conf to send\n :return: None\n "
] |
Please provide a description of the function:def _have_conf(self, magic_hash=None):
self.app.have_conf = getattr(self.app, 'cur_conf', None) not in [None, {}]
if magic_hash is not None:
# Beware, we got an str in entry, not an int
magic_hash = int(magic_hash)
# I've got a conf and a good one
return self.app.have_conf and self.app.cur_conf.magic_hash == magic_hash
return self.app.have_conf | [
"Get the daemon current configuration state\n\n If the daemon has received a configuration from its arbiter, this will\n return True\n\n If a `magic_hash` is provided it is compared with the one included in the\n daemon configuration and this function returns True only if they match!\n\n :return: boolean indicating if the daemon has a configuration\n :rtype: bool\n "
] |
Please provide a description of the function:def _push_actions(self):
data = cherrypy.request.json
with self.app.lock:
self.app.add_actions(data['actions'], data['scheduler_instance_id']) | [
"Push actions to the poller/reactionner\n\n This function is used by the scheduler to send the actions to get executed to\n the poller/reactionner\n\n {'actions': actions, 'instance_id': scheduler_instance_id}\n\n :return:None\n "
] |
Please provide a description of the function:def _results(self, scheduler_instance_id):
with self.app.lock:
res = self.app.get_results_from_passive(scheduler_instance_id)
return serialize(res, True) | [
"Get the results of the executed actions for the scheduler which instance id is provided\n\n Calling this method for daemons that are not configured as passive do not make sense.\n Indeed, this service should only be exposed on poller and reactionner daemons.\n\n :param scheduler_instance_id: instance id of the scheduler\n :type scheduler_instance_id: string\n :return: serialized list\n :rtype: str\n "
] |
Please provide a description of the function:def _broks(self, broker_name): # pylint: disable=unused-argument
with self.app.broks_lock:
res = self.app.get_broks()
return serialize(res, True) | [
"Get the broks from the daemon\n\n This is used by the brokers to get the broks list of a daemon\n\n :return: Brok list serialized\n :rtype: dict\n "
] |
Please provide a description of the function:def _events(self):
with self.app.events_lock:
res = self.app.get_events()
return serialize(res, True) | [
"Get the monitoring events from the daemon\n\n This is used by the arbiter to get the monitoring events from all its satellites\n\n :return: Events list serialized\n :rtype: list\n "
] |
Please provide a description of the function:def serialize(self):
return {'operand': self.operand, 'sons': [serialize(elem) for elem in self.sons],
'of_values': self.of_values, 'is_of_mul': self.is_of_mul,
'not_value': self.not_value} | [
"This function serialize into a simple dict object.\n It is used when transferring data to other daemons over the network (http)\n\n Here we directly return all attributes\n\n :return: json representation of a DependencyNode\n :rtype: dict\n "
] |
Please provide a description of the function:def get_state(self, hosts, services):
# If we are a host or a service, we just got the host/service
# hard state
if self.operand == 'host':
host = hosts[self.sons[0]]
return self.get_host_node_state(host.last_hard_state_id,
host.problem_has_been_acknowledged,
host.in_scheduled_downtime)
if self.operand == 'service':
service = services[self.sons[0]]
return self.get_service_node_state(service.last_hard_state_id,
service.problem_has_been_acknowledged,
service.in_scheduled_downtime)
if self.operand == '|':
return self.get_complex_or_node_state(hosts, services)
if self.operand == '&':
return self.get_complex_and_node_state(hosts, services)
# It's an Xof rule
if self.operand == 'of:':
return self.get_complex_xof_node_state(hosts, services)
# We have an unknown node. Code is not reachable because we validate operands
return 4 | [
"Get node state by looking recursively over sons and applying operand\n\n :param hosts: list of available hosts to search for\n :param services: list of available services to search for\n :return: Node state\n :rtype: int\n "
] |
Please provide a description of the function:def get_host_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime):
# Make DOWN look as CRITICAL (2 instead of 1)
if state == 1:
state = 2
# If our node is acknowledged or in downtime, state is ok/up
if problem_has_been_acknowledged or in_scheduled_downtime:
state = 0
# Maybe we are a NOT node, so manage this
if self.not_value:
return 0 if state else 2 # Keep the logic of return Down on NOT rules
return state | [
"Get host node state, simplest case ::\n\n * Handle not value (revert) for host and consider 1 as 2\n\n :return: 0, 1 or 2\n :rtype: int\n "
] |
Please provide a description of the function:def get_service_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime):
# If our node is acknowledged or in downtime, state is ok/up
if problem_has_been_acknowledged or in_scheduled_downtime:
state = 0
# Maybe we are a NOT node, so manage this
if self.not_value:
# Critical -> OK
if state == 2:
return 0
# OK -> CRITICAL (warning is untouched)
if state == 0:
return 2
return state | [
"Get service node state, simplest case ::\n\n * Handle not value (revert) for service\n\n :return: 0, 1 or 2\n :rtype: int\n "
] |
Please provide a description of the function:def get_complex_or_node_state(self, hosts, services):
# First we get the state of all our sons
states = [s.get_state(hosts, services) for s in self.sons]
# Next we calculate the best state
best_state = min(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(best_state)
return best_state | [
"Get state , handle OR aggregation ::\n\n * Get the best state (min of sons)\n * Revert if it's a not node\n\n :param hosts: host objects\n :param services: service objects\n :return: 0, 1 or 2\n :rtype: int\n "
] |
Please provide a description of the function:def get_complex_and_node_state(self, hosts, services):
# First we get the state of all our sons
states = [s.get_state(hosts, services) for s in self.sons]
# Next we calculate the worst state
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state | [
"Get state , handle AND aggregation ::\n\n * Get the worst state. 2 or max of sons (3 <=> UNKNOWN < CRITICAL <=> 2)\n * Revert if it's a not node\n\n :param hosts: host objects\n :param services: service objects\n :return: 0, 1 or 2\n :rtype: int\n "
] |
Please provide a description of the function:def get_complex_xof_node_state(self, hosts, services):
# pylint: disable=too-many-locals, too-many-return-statements, too-many-branches
# First we get the state of all our sons
states = [s.get_state(hosts, services) for s in self.sons]
# We search for OK, WARNING or CRITICAL applications
# And we will choice between them
nb_search_ok = self.of_values[0]
nb_search_warn = self.of_values[1]
nb_search_crit = self.of_values[2]
# We look for each application
nb_sons = len(states)
nb_ok = nb_warn = nb_crit = 0
for state in states:
if state == 0:
nb_ok += 1
elif state == 1:
nb_warn += 1
elif state == 2:
nb_crit += 1
def get_state_for(nb_tot, nb_real, nb_search):
if nb_search.endswith('%'):
nb_search = int(nb_search[:-1])
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(100 + nb_search, 0)
apply_for = float(nb_real) / nb_tot * 100 >= nb_search
else:
nb_search = int(nb_search)
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(nb_tot + nb_search, 0)
apply_for = nb_real >= nb_search
return apply_for
ok_apply = get_state_for(nb_sons, nb_ok, nb_search_ok)
warn_apply = get_state_for(nb_sons, nb_warn + nb_crit, nb_search_warn)
crit_apply = get_state_for(nb_sons, nb_crit, nb_search_crit)
# return the worst state that apply
if crit_apply:
if self.not_value:
return self.get_reverse_state(2)
return 2
if warn_apply:
if self.not_value:
return self.get_reverse_state(1)
return 1
if ok_apply:
if self.not_value:
return self.get_reverse_state(0)
return 0
# Maybe even OK is not possible, if so, it depends if the admin
# ask a simple form Xof: or a multiple one A,B,Cof:
# the simple should give OK, the mult should give the worst state
if self.is_of_mul:
if self.not_value:
return self.get_reverse_state(0)
return 0
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state | [
"Get state , handle X of aggregation ::\n\n * Count the number of OK, WARNING, CRITICAL\n * Try too apply, in this order, Critical, Warning, OK rule\n * Return the code for first match (2, 1, 0)\n * If no rule apply, return OK for simple X of and worst state for multiple X of\n\n :param hosts: host objects\n :param services: service objects\n :return: 0, 1 or 2\n :rtype: int\n TODO: Looks like the last if does the opposite of what the comment says\n ",
"Check if there is enough value to apply this rule\n\n :param nb_tot: total number of value\n :type nb_tot: int\n :param nb_real: number of value that apply for this rule\n :type nb_real: int\n :param nb_search: max value to apply this rule (can be a percent)\n :type nb_search: int\n :return: True if the rule is effective (roughly nb_real > nb_search), False otherwise\n :rtype: bool\n "
] |
Please provide a description of the function:def list_all_elements(self):
res = []
# We are a host/service
if self.operand in ['host', 'service']:
return [self.sons[0]]
for son in self.sons:
res.extend(son.list_all_elements())
# and returns a list of unique uuids
return list(set(res)) | [
"Get all host/service uuid in our node and below\n\n :return: list of hosts/services uuids\n :rtype: list\n "
] |
Please provide a description of the function:def switch_zeros_of_values(self):
nb_sons = len(self.sons)
# Need a list for assignment
new_values = list(self.of_values)
for i in [0, 1, 2]:
if new_values[i] == '0':
new_values[i] = str(nb_sons)
self.of_values = tuple(new_values) | [
"If we are a of: rule, we can get some 0 in of_values,\n if so, change them with NB sons instead\n\n :return: None\n "
] |
Please provide a description of the function:def is_valid(self):
valid = True
if not self.sons:
valid = False
else:
for son in self.sons:
if isinstance(son, DependencyNode) and not son.is_valid():
self.configuration_errors.extend(son.configuration_errors)
valid = False
return valid | [
"Check if all leaves are correct (no error)\n\n :return: True if correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def eval_cor_pattern(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()&|':
if char in pattern:
complex_node = True
# If it's a simple node, evaluate it directly
if complex_node is False:
return self.eval_simple_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running)
return self.eval_complex_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running) | [
"Parse and build recursively a tree of DependencyNode from pattern\n\n :param pattern: pattern to parse\n :type pattern: str\n :param hosts: hosts list, used to find a specific host\n :type hosts: alignak.objects.host.Host\n :param services: services list, used to find a specific service\n :type services: alignak.objects.service.Service\n :param running: rules are evaluated at run time and parsing. True means runtime\n :type running: bool\n :return: root node of parsed tree\n :rtype: alignak.dependencynode.DependencyNode\n "
] |
Please provide a description of the function:def eval_xof_pattern(node, pattern):
xof_pattern = r"^(-?\d+%?),*(-?\d*%?),*(-?\d*%?) *of: *(.+)"
regex = re.compile(xof_pattern)
matches = regex.search(pattern)
if matches is not None:
node.operand = 'of:'
groups = matches.groups()
# We can have a Aof: rule, or a multiple A,B,Cof: rule.
mul_of = (groups[1] != '' and groups[2] != '')
# If multi got (A,B,C)
if mul_of:
node.is_of_mul = True
node.of_values = (groups[0], groups[1], groups[2])
else: # if not, use A,0,0, we will change 0 after to put MAX
node.of_values = (groups[0], '0', '0')
pattern = matches.groups()[3]
return pattern | [
"Parse a X of pattern\n * Set is_of_mul attribute\n * Set of_values attribute\n\n :param node: node to edit\n :type node:\n :param pattern: line to match\n :type pattern: str\n :return: end of the line (without X of :)\n :rtype: str\n "
] |
Please provide a description of the function:def eval_complex_cor_pattern(self, pattern, hosts, services,
hostgroups, servicegroups, running=False):
# pylint: disable=too-many-branches
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
in_par = False
tmp = ''
son_is_not = False # We keep is the next son will be not or not
stacked_parenthesis = 0
for char in pattern:
if char == '(':
stacked_parenthesis += 1
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_parenthesis == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near", tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_parenthesis > 1:
tmp += char
elif char == ')':
stacked_parenthesis -= 1
if stacked_parenthesis < 0:
# TODO : real error
print("Error : bad expression near", tmp, "too much ')'")
continue
if stacked_parenthesis == 0:
tmp = tmp.strip()
son = self.eval_cor_pattern(tmp, hosts, services,
hostgroups, servicegroups, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += char
# Expressions in par will be parsed in a sub node after. So just
# stack pattern
elif in_par:
tmp += char
# Until here, we're not in par
# Manage the NOT for an expression. Only allow ! at the beginning
# of a host or a host,service expression.
elif char == '!':
tmp = tmp.strip()
if tmp and tmp[0] != '!':
print("Error : bad expression near", tmp, "wrong position for '!'")
continue
# Flags next node not state
son_is_not = True
# DO NOT keep the c in tmp, we consumed it
elif char in ['&', '|']:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
# Look at the rule viability
if node.operand is not None and node.operand != 'of:' and char != node.operand:
# Should be logged as a warning / info? :)
return None
if node.operand != 'of:':
node.operand = char
if tmp != '':
son = self.eval_cor_pattern(tmp, hosts, services,
hostgroups, servicegroups, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
tmp = ''
# Maybe it's a classic character or we're in par, if so, continue
else:
tmp += char
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
son = self.eval_cor_pattern(tmp, hosts, services,
hostgroups, servicegroups, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
# We got our nodes, so we can update 0 values of of_values
# with the number of sons
node.switch_zeros_of_values()
return node | [
"Parse and build recursively a tree of DependencyNode from a complex pattern\n\n :param pattern: pattern to parse\n :type pattern: str\n :param hosts: hosts list, used to find a specific host\n :type hosts: alignak.objects.host.Host\n :param services: services list, used to find a specific service\n :type services: alignak.objects.service.Service\n :param running: rules are evaluated at run time and parsing. True means runtime\n :type running: bool\n :return: root node of parsed tree\n :rtype: alignak.dependencynode.DependencyNode\n "
] |
Please provide a description of the function:def eval_simple_cor_pattern(self, pattern, hosts, services,
hostgroups, servicegroups, running=False):
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
# Is the pattern an expression to be expanded?
if re.search(r"^([%s]+|\*):" % self.host_flags, pattern) or \
re.search(r",\s*([%s]+:.*|\*)$" % self.service_flags, pattern):
# o is just extracted its attributes, then trashed.
son = self.expand_expression(pattern, hosts, services,
hostgroups, servicegroups, running)
if node.operand != 'of:':
node.operand = '&'
node.sons.extend(son.sons)
node.configuration_errors.extend(son.configuration_errors)
node.switch_zeros_of_values()
else:
node.operand = 'object'
obj, error = self.find_object(pattern, hosts, services)
# here we have Alignak SchedulingItem object (Host/Service)
if obj is not None:
# Set host or service
# pylint: disable=E1101
node.operand = obj.__class__.my_type
node.sons.append(obj.uuid) # Only store the uuid, not the full object.
else:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node | [
"Parse and build recursively a tree of DependencyNode from a simple pattern\n\n :param pattern: pattern to parse\n :type pattern: str\n :param hosts: hosts list, used to find a specific host\n :type hosts: alignak.objects.host.Host\n :param services: services list, used to find a specific service\n :type services: alignak.objects.service.Service\n :param running: rules are evaluated at run time and parsing. True means runtime\n :type running: bool\n :return: root node of parsed tree\n :rtype: alignak.dependencynode.DependencyNode\n "
] |
Please provide a description of the function:def find_object(self, pattern, hosts, services):
obj = None
error = None
is_service = False
# h_name, service_desc are , separated
elts = pattern.split(',')
host_name = elts[0].strip()
# If host_name is empty, use the host_name the business rule is bound to
if not host_name:
host_name = self.bound_item.host_name
# Look if we have a service
if len(elts) > 1:
is_service = True
service_description = elts[1].strip()
if is_service:
obj = services.find_srv_by_name_and_hostname(host_name, service_description)
if not obj:
error = "Business rule uses unknown service %s/%s"\
% (host_name, service_description)
else:
obj = hosts.find_by_name(host_name)
if not obj:
error = "Business rule uses unknown host %s" % (host_name,)
return obj, error | [
"Find object from pattern\n\n :param pattern: text to search (host1,service1)\n :type pattern: str\n :param hosts: hosts list, used to find a specific host\n :type hosts: alignak.objects.host.Host\n :param services: services list, used to find a specific service\n :type services: alignak.objects.service.Service\n :return: tuple with Host or Service object and error\n :rtype: tuple\n "
] |
Please provide a description of the function:def expand_expression(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
# pylint: disable=too-many-locals
error = None
node = DependencyNode()
node.operand = '&'
elts = [e.strip() for e in pattern.split(',')]
# If host_name is empty, use the host_name the business rule is bound to
if not elts[0]:
elts[0] = self.bound_item.host_name
filters = []
# Looks for hosts/services using appropriate filters
try:
all_items = {
"hosts": hosts,
"hostgroups": hostgroups,
"servicegroups": servicegroups
}
if len(elts) > 1:
# We got a service expression
host_expr, service_expr = elts
filters.extend(self.get_srv_host_filters(host_expr))
filters.extend(self.get_srv_service_filters(service_expr))
items = services.find_by_filter(filters, all_items)
else:
# We got a host expression
host_expr = elts[0]
filters.extend(self.get_host_filters(host_expr))
items = hosts.find_by_filter(filters, all_items)
except re.error as regerr:
error = "Business rule uses invalid regex %s: %s" % (pattern, regerr)
else:
if not items:
error = "Business rule got an empty result for pattern %s" % pattern
# Checks if we got result
if error:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
# Creates dependency node subtree
# here we have Alignak SchedulingItem object (Host/Service)
for item in items:
# Creates a host/service node
son = DependencyNode()
son.operand = item.__class__.my_type
son.sons.append(item.uuid) # Only store the uuid, not the full object.
# Appends it to wrapping node
node.sons.append(son)
node.switch_zeros_of_values()
return node | [
"Expand a host or service expression into a dependency node tree\n using (host|service)group membership, regex, or labels as item selector.\n\n :param pattern: pattern to parse\n :type pattern: str\n :param hosts: hosts list, used to find a specific host\n :type hosts: alignak.objects.host.Host\n :param services: services list, used to find a specific service\n :type services: alignak.objects.service.Service\n :param running: rules are evaluated at run time and parsing. True means runtime\n :type running: bool\n :return: root node of parsed tree\n :rtype: alignak.dependencynode.DependencyNode\n "
] |
Please provide a description of the function:def get_host_filters(self, expr):
# pylint: disable=too-many-return-statements
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_host_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_host_by_group(expr)]
if "r" in flags:
return [filter_host_by_regex(expr)]
if "l" in flags:
return [filter_host_by_bp_rule_label(expr)]
if "t" in flags:
return [filter_host_by_tag(expr)]
return [filter_none] | [
"Generates host filter list corresponding to the expression ::\n\n * '*' => any\n * 'g' => group filter\n * 'r' => regex name filter\n * 'l' => bp rule label filter\n * 't' => tag filter\n * '' => none filter\n * No flag match => host name filter\n\n :param expr: expression to parse\n :type expr: str\n :return: filter list\n :rtype: list\n "
] |
Please provide a description of the function:def get_srv_host_filters(self, expr):
# pylint: disable=too-many-return-statements
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_service_by_host_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_hostgroup_name(expr)]
if "r" in flags:
return [filter_service_by_regex_host_name(expr)]
if "l" in flags:
return [filter_service_by_host_bp_rule_label(expr)]
if "t" in flags:
return [filter_service_by_host_tag_name(expr)]
return [filter_none] | [
"Generates service filter list corresponding to the expression ::\n\n * '*' => any\n * 'g' => hostgroup filter\n * 'r' => host regex name filter\n * 'l' => host bp rule label filter\n * 't' => tag filter\n * '' => none filter\n * No flag match => host name filter\n\n :param expr: expression to parse\n :type expr: str\n :return: filter list\n :rtype: list\n "
] |
Please provide a description of the function:def get_srv_service_filters(self, expr):
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.service_flags, expr)
if match is None:
return [filter_service_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_servicegroup_name(expr)]
if "r" in flags:
return [filter_service_by_regex_name(expr)]
if "l" in flags:
return [filter_service_by_bp_rule_label(expr)]
return [filter_none] | [
"Generates service filter list corresponding to the expression ::\n\n * '*' => any\n * 'g' => servicegroup filter\n * 'r' => service regex name filter\n * 'l' => service bp rule label filter\n * 't' => tag filter\n * '' => none filter\n * No flag match => service name filter\n\n :param expr: expression to parse\n :type expr: str\n :return: filter list\n :rtype: list\n "
] |
Please provide a description of the function:def serialize(self):
res = super(Timeperiod, self).serialize()
res['dateranges'] = []
for elem in self.dateranges:
res['dateranges'].append({'__sys_python_module__': "%s.%s" % (elem.__module__,
elem.__class__.__name__),
'content': elem.serialize()})
return res | [
"This function serialize into a simple dict object.\n It is used when transferring data to other daemons over the network (http)\n\n Here we directly return all attributes\n\n :return: json representation of a Timeperiod\n :rtype: dict\n "
] |
Please provide a description of the function:def get_raw_import_values(self): # pragma: no cover, deprecation
properties = ['timeperiod_name', 'alias', 'use', 'register']
res = {}
for prop in properties:
if hasattr(self, prop):
val = getattr(self, prop)
res[prop] = val
# Now the unresolved one. The only way to get ride of same key things is to put
# directly the full value as the key
for other in self.unresolved:
res[other] = ''
return res | [
"\n Get some properties of timeperiod (timeperiod is a bit different\n from classic item)\n\n TODO: never called anywhere, still useful?\n\n :return: a dictionnary of some properties\n :rtype: dict\n "
] |
Please provide a description of the function:def is_time_valid(self, timestamp):
if hasattr(self, 'exclude'):
for daterange in self.exclude:
if daterange.is_time_valid(timestamp):
return False
for daterange in self.dateranges:
if daterange.is_time_valid(timestamp):
return True
return False | [
"\n Check if a time is valid or not\n\n :return: time is valid or not\n :rtype: bool\n "
] |
Please provide a description of the function:def get_min_from_t(self, timestamp):
mins_incl = []
for daterange in self.dateranges:
mins_incl.append(daterange.get_min_from_t(timestamp))
return min(mins_incl) | [
"\n Get the first time > timestamp which is valid\n\n :param timestamp: number of seconds\n :type timestamp: int\n :return: number of seconds\n :rtype: int\n TODO: not used, so delete it\n "
] |
Please provide a description of the function:def check_and_log_activation_change(self):
now = int(time.time())
was_active = self.is_active
self.is_active = self.is_time_valid(now)
# If we got a change, log it!
if self.is_active != was_active:
_from = 0
_to = 0
# If it's the start, get a special value for was
if not self.activated_once:
_from = -1
self.activated_once = True
if was_active:
_from = 1
if self.is_active:
_to = 1
# Now raise the log
brok = make_monitoring_log(
'info', 'TIMEPERIOD TRANSITION: %s;%d;%d' % (self.get_name(), _from, _to)
)
return brok
return None | [
"\n Will look for active/un-active change of timeperiod.\n In case it change, we log it like:\n [1327392000] TIMEPERIOD TRANSITION: <name>;<from>;<to>\n\n States of is_active:\n -1: default value when start\n 0: when timeperiod end\n 1: when timeperiod start\n\n :return: None or a brok if TP changed\n "
] |
Please provide a description of the function:def clean_cache(self):
now = int(time.time())
t_to_del = []
for timestamp in self.cache:
if timestamp < now:
t_to_del.append(timestamp)
for timestamp in t_to_del:
del self.cache[timestamp]
# same for the invalid cache
t_to_del = []
for timestamp in self.invalid_cache:
if timestamp < now:
t_to_del.append(timestamp)
for timestamp in t_to_del:
del self.invalid_cache[timestamp] | [
"\n Clean cache with entries older than now because not used in future ;)\n\n :return: None\n "
] |
Please provide a description of the function:def get_next_valid_time_from_t(self, timestamp):
# pylint: disable=too-many-branches
timestamp = int(timestamp)
original_t = timestamp
res_from_cache = self.find_next_valid_time_from_cache(timestamp)
if res_from_cache is not None:
return res_from_cache
still_loop = True
# Loop for all minutes...
while still_loop:
local_min = None
# Ok, not in cache...
dr_mins = []
for daterange in self.dateranges:
dr_mins.append(daterange.get_next_valid_time_from_t(timestamp))
s_dr_mins = sorted([d for d in dr_mins if d is not None])
for t01 in s_dr_mins:
if not self.exclude and still_loop:
# No Exclude so we are good
local_min = t01
still_loop = False
else:
for timeperiod in self.exclude:
if not timeperiod.is_time_valid(t01) and still_loop:
# OK we found a date that is not valid in any exclude timeperiod
local_min = t01
still_loop = False
if local_min is None:
# Looking for next invalid date
exc_mins = []
if s_dr_mins != []:
for timeperiod in self.exclude:
exc_mins.append(timeperiod.get_next_invalid_time_from_t(s_dr_mins[0]))
s_exc_mins = sorted([d for d in exc_mins if d is not None])
if s_exc_mins != []:
local_min = s_exc_mins[0]
if local_min is None:
still_loop = False
else:
timestamp = local_min
# No loop more than one year
if timestamp > original_t + 3600 * 24 * 366 + 1:
still_loop = False
local_min = None
# Ok, we update the cache...
self.cache[original_t] = local_min
return local_min | [
"\n Get next valid time. If it's in cache, get it, otherwise define it.\n The limit to find it is 1 year.\n\n :param timestamp: number of seconds\n :type timestamp: int or float\n :return: Nothing or time in seconds\n :rtype: None or int\n "
] |
Please provide a description of the function:def get_next_invalid_time_from_t(self, timestamp):
# pylint: disable=too-many-branches
timestamp = int(timestamp)
original_t = timestamp
dr_mins = []
for daterange in self.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
periods = merge_periods(dr_mins)
# manage exclude periods
dr_mins = []
for exclude in self.exclude:
for daterange in exclude.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
if not dr_mins:
periods_exclude = []
else:
periods_exclude = merge_periods(dr_mins)
if len(periods) >= 1:
# if first valid period is after original timestamp, the first invalid time
# is the original timestamp
if periods[0][0] > original_t:
return original_t
# check the first period + first period of exclude
if len(periods_exclude) >= 1:
if periods_exclude[0][0] < periods[0][1]:
return periods_exclude[0][0]
return periods[0][1]
return original_t | [
"\n Get the next invalid time\n\n :param timestamp: timestamp in seconds (of course)\n :type timestamp: int or float\n :return: timestamp of next invalid time\n :rtype: int or float\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
for daterange in self.dateranges:
good = daterange.is_correct()
if not good:
self.add_error("[timeperiod::%s] invalid daterange '%s'"
% (self.get_name(), daterange))
state &= good
# Warn about non correct entries
for entry in self.invalid_entries:
self.add_error("[timeperiod::%s] invalid entry '%s'" % (self.get_name(), entry))
return super(Timeperiod, self).is_correct() and state | [
"Check if this object configuration is correct ::\n\n * Check if dateranges of timeperiod are valid\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False if at least one daterange\n is not correct\n :rtype: bool\n "
] |
Please provide a description of the function:def resolve_daterange(self, dateranges, entry):
# pylint: disable=too-many-return-statements,too-many-statements,
# pylint: disable=too-many-branches,too-many-locals
res = re.search(
r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry
)
if res is not None:
(syear, smon, smday, eyear, emon, emday, skip_interval, other) = res.groups()
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': skip_interval,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(r'(\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(syear, smon, smday, skip_interval, other) = res.groups()
eyear = syear
emon = smon
emday = smday
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': skip_interval,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(
r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry
)
if res is not None:
(syear, smon, smday, eyear, emon, emday, other) = res.groups()
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(r'(\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(syear, smon, smday, other) = res.groups()
eyear = syear
emon = smon
emday = smday
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(
r'([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) / (\d+)[\s\t]*([0-9:, -]+)',
entry
)
if res is not None:
(swday, swday_offset, smon, ewday,
ewday_offset, emon, skip_interval, other) = res.groups()
smon_id = Daterange.get_month_id(smon)
emon_id = Daterange.get_month_id(emon)
swday_id = Daterange.get_weekday_id(swday)
ewday_id = Daterange.get_weekday_id(ewday)
data = {'syear': 0, 'smon': smon_id, 'smday': 0, 'swday': swday_id,
'swday_offset': swday_offset, 'eyear': 0, 'emon': emon_id, 'emday': 0,
'ewday': ewday_id, 'ewday_offset': ewday_offset, 'skip_interval': skip_interval,
'other': other}
dateranges.append(MonthWeekDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, t01, emday, skip_interval, other) = res.groups()
if t00 in Daterange.weekdays and t01 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
ewday = Daterange.get_weekday_id(t01)
swday_offset = smday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset,
'skip_interval': skip_interval, 'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months and t01 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = Daterange.get_month_id(t01)
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': emon, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day' and t01 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': 0, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, emday, skip_interval, other) = res.groups()
if t00 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
swday_offset = smday
ewday = swday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset,
'skip_interval': skip_interval, 'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = smon
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': emon, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': 0, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(
r'([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) [\s\t]*([0-9:, -]+)', entry
)
if res is not None:
(swday, swday_offset, smon, ewday, ewday_offset, emon, other) = res.groups()
smon_id = Daterange.get_month_id(smon)
emon_id = Daterange.get_month_id(emon)
swday_id = Daterange.get_weekday_id(swday)
ewday_id = Daterange.get_weekday_id(ewday)
data = {'syear': 0, 'smon': smon_id, 'smday': 0, 'swday': swday_id,
'swday_offset': swday_offset, 'eyear': 0, 'emon': emon_id, 'emday': 0,
'ewday': ewday_id, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(MonthWeekDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, emday, other) = res.groups()
if t00 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
swday_offset = smday
ewday = swday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = smon
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, t01, emday, other) = res.groups()
if t00 in Daterange.weekdays and t01 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
ewday = Daterange.get_weekday_id(t01)
swday_offset = smday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months and t01 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = Daterange.get_month_id(t01)
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day' and t01 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) ([a-z]*)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, t02, t01, other) = res.groups()
if t00 in Daterange.weekdays and t01 in Daterange.months:
swday = Daterange.get_weekday_id(t00)
smon = Daterange.get_month_id(t01)
emon = smon
ewday = swday
ewday_offset = t02
data = {'syear': 0, 'smon': smon, 'smday': 0, 'swday': swday,
'swday_offset': t02, 'eyear': 0, 'emon': emon, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(MonthWeekDayDaterange(data))
return
if not t01:
if t00 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
swday_offset = t02
ewday = swday
ewday_offset = swday_offset
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = smon
emday = t02
data = {'syear': 0, 'smon': smon, 'smday': t02, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day':
emday = t02
data = {'syear': 0, 'smon': 0, 'smday': t02, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
(t00, other) = res.groups()
if t00 in Daterange.weekdays:
day = t00
data = {'day': day, 'other': other}
dateranges.append(StandardDaterange(data))
return
logger.info("[timeentry::%s] no match for %s", self.get_name(), entry)
self.invalid_entries.append(entry) | [
"\n Try to solve dateranges (special cases)\n\n :param dateranges: dateranges\n :type dateranges: list\n :param entry: property of timeperiod\n :type entry: string\n :return: None\n "
] |
Please provide a description of the function:def explode(self):
for entry in self.unresolved:
self.resolve_daterange(self.dateranges, entry)
self.unresolved = [] | [
"\n Try to resolve all unresolved elements\n\n :return: None\n "
] |
Please provide a description of the function:def linkify(self, timeperiods):
new_exclude = []
if hasattr(self, 'exclude') and self.exclude != []:
logger.debug("[timeentry::%s] have excluded %s", self.get_name(), self.exclude)
excluded_tps = self.exclude
for tp_name in excluded_tps:
timepriod = timeperiods.find_by_name(tp_name.strip())
if timepriod is not None:
new_exclude.append(timepriod.uuid)
else:
msg = "[timeentry::%s] unknown %s timeperiod" % (self.get_name(), tp_name)
self.add_error(msg)
self.exclude = new_exclude | [
"\n Will make timeperiod in exclude with id of the timeperiods\n\n :param timeperiods: Timeperiods object\n :type timeperiods:\n :return: None\n "
] |
Please provide a description of the function:def check_exclude_rec(self):
# pylint: disable=access-member-before-definition
if self.rec_tag:
msg = "[timeentry::%s] is in a loop in exclude parameter" % (self.get_name())
self.add_error(msg)
return False
self.rec_tag = True
for timeperiod in self.exclude:
timeperiod.check_exclude_rec()
return True | [
"\n Check if this timeperiod is tagged\n\n :return: if tagged return false, if not true\n :rtype: bool\n "
] |
Please provide a description of the function:def explode(self):
for t_id in self.items:
timeperiod = self.items[t_id]
timeperiod.explode() | [
"\n Try to resolve each timeperiod\n\n :return: None\n "
] |
Please provide a description of the function:def linkify(self):
for t_id in self.items:
timeperiod = self.items[t_id]
timeperiod.linkify(self) | [
"\n Check exclusion for each timeperiod\n\n :return: None\n "
] |
Please provide a description of the function:def get_unresolved_properties_by_inheritance(self, timeperiod):
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in timeperiod.templates:
template = self.templates[i]
timeperiod.unresolved.extend(template.unresolved) | [
"\n Fill full properties with template if needed for the\n unresolved values (example: sunday ETCETC)\n :return: None\n "
] |
Please provide a description of the function:def apply_inheritance(self):
self.apply_partial_inheritance('exclude')
for i in self:
self.get_customs_properties_by_inheritance(i)
# And now apply inheritance for unresolved properties
# like the dateranges in fact
for timeperiod in self:
self.get_unresolved_properties_by_inheritance(timeperiod) | [
"\n The only interesting property to inherit is exclude\n\n :return: None\n "
] |
Please provide a description of the function:def is_correct(self):
valid = True
# We do not want a same hg to be explode again and again
# so we tag it
for timeperiod in list(self.items.values()):
timeperiod.rec_tag = False
for timeperiod in list(self.items.values()):
for tmp_tp in list(self.items.values()):
tmp_tp.rec_tag = False
valid = timeperiod.check_exclude_rec() and valid
# We clean the tags and collect the warning/erro messages
for timeperiod in list(self.items.values()):
del timeperiod.rec_tag
# Now other checks
if not timeperiod.is_correct():
valid = False
source = getattr(timeperiod, 'imported_from', "unknown source")
msg = "Configuration in %s::%s is incorrect; from: %s" % (
timeperiod.my_type, timeperiod.get_name(), source
)
self.add_error(msg)
self.configuration_errors += timeperiod.configuration_errors
self.configuration_warnings += timeperiod.configuration_warnings
# And check all timeperiods for correct (sunday is false)
for timeperiod in self:
valid = timeperiod.is_correct() and valid
return valid | [
"\n check if each properties of timeperiods are valid\n\n :return: True if is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get_override_configuration(self):
res = {}
properties = self.__class__.properties
for prop, entry in list(properties.items()):
if entry.override:
res[prop] = getattr(self, prop)
return res | [
"\n Some parameters can give as 'overridden parameters' like use_timezone\n so they will be mixed (in the scheduler) with the standard conf sent by the arbiter\n\n :return: dictionary of properties\n :rtype: dict\n "
] |
Please provide a description of the function:def check_reachable(self, forced=False, test=False):
# pylint: disable=too-many-branches
all_ok = True
self.not_configured = []
for daemon_link in self.all_daemons_links:
if daemon_link == self.arbiter_link:
# I exclude myself from the polling, sure I am reachable ;)
continue
if not daemon_link.active:
# I exclude the daemons that are not active
continue
# ----------
if test:
# print("*** unit tests - setting %s as alive" % daemon_link.name)
# Set the satellite as alive
daemon_link.set_alive()
daemon_link.running_id = time.time()
# daemon_link.cfg_managed = {}
# continue
# ----------
# Force the daemon communication only if a configuration is prepared
result = False
try:
result = daemon_link.update_infos(forced=(forced or self.new_to_dispatch),
test=test)
except LinkError:
logger.warning("Daemon connection failed, I could not get fresh information.")
if result is not False:
if result is None:
# Come back later ... too recent daemon connection!
continue
if result:
# Got a managed configuration
logger.debug("The %s '%s' manages %s",
daemon_link.type, daemon_link.name, daemon_link.cfg_managed)
if not self.first_dispatch_done:
# I just (re)started the arbiter
self.not_configured.append(daemon_link)
else:
# No managed configuration - a new dispatching is necessary but only
# if we already dispatched a configuration
# Probably a freshly restarted daemon ;)
logger.debug("The %s %s do not have a configuration",
daemon_link.type, daemon_link.name)
# the daemon is not yet configured
self.not_configured.append(daemon_link)
daemon_link.configuration_sent = False
else:
# Got a timeout !
self.not_configured.append(daemon_link)
if self.not_configured and self.new_to_dispatch and not self.first_dispatch_done:
logger.info("Dispatcher, these daemons are not configured: %s, "
"and a configuration is ready to dispatch, run the dispatching...",
','.join(d.name for d in self.not_configured))
self.dispatch_ok = False
self.dispatch(test=test)
elif self.not_configured and self.first_dispatch_done:
logger.info("Dispatcher, these daemons are not configured: %s, "
"and a configuration has yet been dispatched dispatch, "
"a new dispatch is required...",
','.join(d.name for d in self.not_configured))
self.dispatch_ok = False
# Avoid exception because dispatch is not accepted!
self.new_to_dispatch = True
self.first_dispatch_done = False
self.dispatch(test=test)
return all_ok | [
"Check all daemons state (reachable or not)\n\n If test parameter is True, do not really send but simulate only for testing purpose...\n\n The update_infos function returns None when no ping has been executed\n (too early...), or True / False according to the real ping and get managed\n configuration result. So, if the result is None, consider as not valid,\n else compute the global result...\n\n :return: True if all daemons are reachable\n "
] |
Please provide a description of the function:def check_status_and_get_events(self):
# pylint: disable=too-many-branches
statistics = {}
events = []
for daemon_link in self.all_daemons_links:
if daemon_link == self.arbiter_link:
# I exclude myself from the polling, sure I am reachable ;)
continue
if not daemon_link.active:
# I exclude the daemons that are not active
continue
try:
# Do not get the details to avoid overloading the communication
daemon_link.statistics = daemon_link.get_daemon_stats(details=False)
if daemon_link.statistics:
daemon_link.statistics['_freshness'] = int(time.time())
statistics[daemon_link.name] = daemon_link.statistics
logger.debug("Daemon %s statistics: %s",
daemon_link.name, daemon_link.statistics)
except LinkError:
logger.warning("Daemon connection failed, I could not get statistics.")
try:
got = daemon_link.get_events()
if got:
events.extend(got)
logger.debug("Daemon %s has %d events: %s", daemon_link.name, len(got), got)
except LinkError:
logger.warning("Daemon connection failed, I could not get events.")
return events | [
"Get all the daemons status\n\n\n :return: Dictionary with all the daemons returned information\n :rtype: dict\n "
] |
Please provide a description of the function:def check_dispatch(self): # pylint: disable=too-many-branches
if not self.arbiter_link:
raise DispatcherError("Dispatcher configuration problem: no valid arbiter link!")
if not self.first_dispatch_done:
raise DispatcherError("Dispatcher cannot check the dispatching, "
"because no configuration is dispatched!")
# We check for configuration parts to be dispatched on alive schedulers.
# If not dispatched, we need a dispatch :) and if dispatched on a failed node,
# remove the association, and need a new dispatch
self.dispatch_ok = True
some_satellites_are_missing = False
# Get fresh information about the satellites
logger.info("Getting fresh information")
self.check_reachable(forced=True)
logger.info("Checking realms dispatch:")
for realm in self.alignak_conf.realms:
logger.info("- realm %s:", realm.name)
for cfg_part in list(realm.parts.values()):
logger.info(" .configuration %s", cfg_part)
# This should never happen, logically!
if not cfg_part.scheduler_link:
self.dispatch_ok = False
logger.error("- realm %s:", realm.name)
logger.error(" .configuration %s", cfg_part)
logger.error(" not managed by any scheduler!")
continue
logger.debug(" checking scheduler %s configuration: %s",
cfg_part.scheduler_link.name, cfg_part.instance_id)
# Maybe the scheduler restarts, so it is alive but without
# the expected configuration; set the configuration part as unmanaged
# and ask for a new configuration dispatch
if not cfg_part.scheduler_link.manages(cfg_part):
# We ask for a new dispatching
self.dispatch_ok = False
if cfg_part.scheduler_link.cfg_managed is None:
logger.warning(" %s not yet !.",
cfg_part.scheduler_link.name)
else:
logger.warning(" the assigned scheduler %s does not manage the "
"configuration; asking for a new configuration dispatch.",
cfg_part.scheduler_link.name)
cfg_part.scheduler_link.cfg_to_manage = None
cfg_part.scheduler_link.push_flavor = ''
cfg_part.scheduler_link.hash = ''
cfg_part.scheduler_link.need_conf = True
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
logger.debug(" checking %ss configuration", sat_type)
# We must have the correct number of satellites or we are not happy
# So we are sure to raise a dispatch every loop a satellite is missing
if (len(realm.to_satellites_managed_by[sat_type][cfg_part.instance_id]) <
realm.get_nb_of_must_have_satellites(sat_type)):
some_satellites_are_missing = True
logger.warning(" missing %s satellites: %s / %s!", sat_type,
realm.to_satellites_managed_by[sat_type][
cfg_part.instance_id],
realm.get_nb_of_must_have_satellites(sat_type))
# TODO: less violent! Must only resend to the one needing?
# must be caught by satellite who sees that
# it already has the conf and do nothing
self.dispatch_ok = False # so we will redispatch all
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
for satellite in realm.to_satellites_managed_by[sat_type][cfg_part.instance_id]:
# Maybe the sat was marked as not alive, but still in
# to_satellites_managed_by. That means that a new dispatch
# is needed
# Or maybe it is alive but I thought that this satellite
# managed the conf and it doesn't.
# I ask a full redispatch of these cfg for both cases
if not satellite.reachable:
logger.info(" the %s %s is not reachable; "
"assuming a correct configuration dispatch.",
sat_type, satellite.name)
continue
# if not cfg_part.push_flavor:
# logger.warning(" the %s %s manages an unmanaged configuration; "
# "asking for a new configuration dispatch.",
# sat_type, satellite.name)
if not satellite.manages(cfg_part):
logger.warning(" the %s %s does not manage "
"the correct configuration; "
"asking for a new configuration dispatch.",
sat_type, satellite.name)
self.dispatch_ok = False
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
if some_satellites_are_missing:
logger.warning("Some satellites are not available for the current configuration")
return self.dispatch_ok | [
"Check that all active satellites have a configuration dispatched\n\n A DispatcherError exception is raised if no configuration is dispatched!\n\n :return: None\n "
] |
Please provide a description of the function:def get_satellites_list(self, sat_type):
satellites_list = []
if sat_type in ['arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers']:
for satellite in getattr(self, sat_type):
satellites_list.append(satellite)
satellites_list = master_then_spare(satellites_list)
return satellites_list | [
"Get a sorted satellite list: master then spare\n\n :param sat_type: type of the required satellites (arbiters, schedulers, ...)\n :type sat_type: str\n :return: sorted satellites list\n :rtype: list[alignak.objects.satellitelink.SatelliteLink]\n "
] |
Please provide a description of the function:def get_scheduler_ordered_list(self, realm):
# Get the schedulers for the required realm
scheduler_links = []
for scheduler_link_uuid in realm.schedulers:
scheduler_links.append(self.schedulers[scheduler_link_uuid])
# Now we sort the schedulers so we take alive, then spare, then dead,
alive = []
spare = []
deads = []
for sdata in scheduler_links:
if sdata.alive and not sdata.spare:
alive.append(sdata)
elif sdata.alive and sdata.spare:
spare.append(sdata)
else:
deads.append(sdata)
scheduler_links = []
scheduler_links.extend(alive)
scheduler_links.extend(spare)
scheduler_links.extend(deads)
scheduler_links.reverse() # I need to pop the list, so reverse the list...
return scheduler_links | [
"Get sorted scheduler list for a specific realm\n\n List is ordered as: alive first, then spare (if any), then dead scheduler links\n\n :param realm: realm we want scheduler from\n :type realm: alignak.objects.realm.Realm\n :return: sorted scheduler list\n :rtype: list[alignak.objects.schedulerlink.SchedulerLink]\n "
] |
Please provide a description of the function:def prepare_dispatch(self):
# pylint:disable=too-many-branches, too-many-statements, too-many-locals
if self.new_to_dispatch:
raise DispatcherError("A configuration is already prepared!")
# So we are preparing a new dispatching...
self.new_to_dispatch = True
self.first_dispatch_done = False
# Update Alignak name for all the satellites
for daemon_link in self.all_daemons_links:
daemon_link.cfg.update({'alignak_name': self.alignak_conf.alignak_name})
logger.info("Preparing realms dispatch:")
# Prepare the arbiters configuration
master_arbiter_cfg = arbiters_cfg = {}
for arbiter_link in self.get_satellites_list('arbiters'):
# # If not me and not a spare arbiter...
# if arbiter_link == self.arbiter_link:
# # I exclude myself from the dispatching, I have my configuration ;)
# continue
if not arbiter_link.active:
# I exclude the daemons that are not active
continue
arbiter_cfg = arbiter_link.cfg
arbiter_cfg.update({
'managed_hosts_names': [h.get_name() for h in self.alignak_conf.hosts],
'modules': serialize(arbiter_link.modules, True),
'managed_conf_id': self.alignak_conf.instance_id,
'push_flavor': ''
})
# Hash the configuration
cfg_string = json.dumps(arbiter_cfg, sort_keys=True).encode('utf-8')
arbiter_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Update the arbiters list, but do not include the whole conf
arbiters_cfg[arbiter_link.uuid] = arbiter_cfg['self_conf']
# Not for the master arbiter...
if arbiter_link != self.arbiter_link:
arbiter_cfg.update({
'arbiters': master_arbiter_cfg,
'whole_conf': self.alignak_conf.spare_arbiter_conf,
})
# Hash the whole configuration
try:
s_conf_part = json.dumps(arbiter_cfg, sort_keys=True).encode('utf-8')
except UnicodeDecodeError:
pass
arbiter_cfg['hash'] = hashlib.sha1(s_conf_part).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(arbiter_cfg)
logger.info(' arbiter configuration size: %d bytes', sys.getsizeof(pickled_conf))
# The configuration is assigned to the arbiter
# todo: perhaps this should be done in the realms (like schedulers and satellites)?
arbiter_link.cfg = arbiter_cfg
arbiter_link.cfg_to_manage = self.alignak_conf
arbiter_link.push_flavor = arbiter_cfg['push_flavor']
arbiter_link.hash = arbiter_cfg['hash']
arbiter_link.need_conf = False
arbiter_link.configuration_sent = False
# If not me and not a spare arbiter...
if arbiter_link == self.arbiter_link:
# The master arbiter configuration for the other satellites
master_arbiter_cfg = {self.arbiter_link.uuid: arbiter_cfg['self_conf']}
logger.info(' arbiter configuration prepared for %s', arbiter_link.name)
# main_realm = self.alignak_conf.realms.find_by_name('All')
# all_realms = main_realm.all_sub_members
# for realm_uuid in all_realms:
# realm = self.alignak_conf.realms[realm_uuid]
# logger.info("- realm %s: %s", realm_uuid, realm)
for realm in self.alignak_conf.realms:
logger.info("- realm %s: %d configuration part(s)", realm.name, len(realm.parts))
# parts_to_dispatch is a list of configuration parts built when
# the configuration is split into parts for the realms and their schedulers
# Only get the parts that are not yet assigned to a scheduler
parts_to_dispatch = [cfg for cfg in list(realm.parts.values()) if not cfg.is_assigned]
if not parts_to_dispatch:
logger.info(' no configuration to dispatch for this realm!')
continue
logger.info(" preparing the dispatch for schedulers:")
# Now we get all the schedulers of this realm and upper
# schedulers = self.get_scheduler_ordered_list(realm)
schedulers = realm.get_potential_satellites_by_type(
self.get_satellites_list('schedulers'), 'scheduler')
if not schedulers:
logger.error(' no available schedulers in this realm (%s)!', realm)
continue
logger.info(" realm schedulers: %s",
','.join([s.get_name() for s in schedulers]))
for cfg_part in parts_to_dispatch:
logger.info(" .assigning configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
# we need to loop until the configuration part is assigned to a scheduler
# or no more scheduler is available
while True:
try:
scheduler_link = schedulers.pop()
except IndexError: # No more schedulers.. not good, no loop
# The configuration part do not need to be dispatched anymore
# todo: should be managed inside the Realm class!
logger.error("No more scheduler link: %s", realm)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
realm.to_satellites[sat_type][cfg_part.instance_id] = None
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = \
False
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
break
# if scheduler_link.manage_sub_realms:
# logger.warning('[%s] The scheduler %s is configured to manage sub realms.'
# ' This is not yet possible, sorry!',
# realm.name, scheduler_link.name)
# scheduler_link.manage_sub_realms = False
# continue
if not scheduler_link.need_conf:
logger.info('[%s] The scheduler %s do not need any configuration, sorry',
realm.name, scheduler_link.name)
continue
logger.debug(" preparing configuration part '%s' for the scheduler '%s'",
cfg_part.instance_id, scheduler_link.name)
logger.debug(" - %d hosts, %d services",
len(cfg_part.hosts), len(cfg_part.services))
# Serialization and hashing
s_conf_part = serialize(realm.parts[cfg_part.instance_id])
try:
s_conf_part = s_conf_part.encode('utf-8')
except UnicodeDecodeError:
pass
cfg_part.push_flavor = hashlib.sha1(s_conf_part).hexdigest()
# We generate the scheduler configuration for the satellites:
# ---
sat_scheduler_cfg = scheduler_link.give_satellite_cfg()
sat_scheduler_cfg.update({
'managed_hosts_names': [h.get_name() for h in cfg_part.hosts],
'managed_conf_id': cfg_part.instance_id,
'push_flavor': cfg_part.push_flavor
})
# Generate a configuration hash
cfg_string = json.dumps(sat_scheduler_cfg, sort_keys=True).encode('utf-8')
sat_scheduler_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
logger.debug(' satellite scheduler configuration: %s', sat_scheduler_cfg)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
realm.to_satellites[sat_type][cfg_part.instance_id] = sat_scheduler_cfg
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
# ---
scheduler_link.cfg.update({
# Global instance configuration
'instance_id': scheduler_link.instance_id,
'instance_name': scheduler_link.name,
'schedulers': {scheduler_link.uuid: sat_scheduler_cfg},
'arbiters': arbiters_cfg if scheduler_link.manage_arbiters else {},
'satellites': realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers),
'modules': serialize(scheduler_link.modules, True),
'conf_part': serialize(realm.parts[cfg_part.instance_id]),
'managed_conf_id': cfg_part.instance_id,
'push_flavor': cfg_part.push_flavor,
'override_conf': scheduler_link.get_override_configuration()
})
# Hash the whole configuration
cfg_string = json.dumps(scheduler_link.cfg, sort_keys=True).encode('utf-8')
scheduler_link.cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(scheduler_link.cfg)
logger.info(" scheduler configuration size: %d bytes",
sys.getsizeof(pickled_conf))
logger.info(" scheduler satellites:")
satellites = realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers)
for sat_type in satellites:
logger.info(" - %s", sat_type)
for sat_link_uuid in satellites[sat_type]:
satellite = satellites[sat_type][sat_link_uuid]
logger.info(" %s", satellite['name'])
# The configuration part is assigned to a scheduler
cfg_part.is_assigned = True
cfg_part.scheduler_link = scheduler_link
scheduler_link.cfg_to_manage = cfg_part
scheduler_link.push_flavor = cfg_part.push_flavor
scheduler_link.hash = scheduler_link.cfg['hash']
scheduler_link.need_conf = False
scheduler_link.configuration_sent = False
logger.info(' configuration %s (%s) assigned to %s',
cfg_part.instance_id, cfg_part.push_flavor, scheduler_link.name)
# The configuration part is assigned to a scheduler, no need to go further ;)
break
logger.info(" preparing the dispatch for satellites:")
for cfg_part in list(realm.parts.values()):
logger.info(" .configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
if cfg_part.instance_id not in realm.to_satellites_need_dispatch[sat_type]:
logger.warning(" nothing to dispatch for %ss", sat_type)
return
if not realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id]:
logger.warning(" no need to dispatch to %ss", sat_type)
return
# Get the list of the concerned satellites
satellites = realm.get_potential_satellites_by_type(self.satellites, sat_type)
if satellites:
logger.info(" realm %ss: %s",
sat_type, ','.join([s.get_name() for s in satellites]))
else:
logger.info(" no %s satellites", sat_type)
# Now we dispatch cfg to every one ask for it
nb_cfg_prepared = 0
for sat_link in satellites:
if not sat_link.active:
# I exclude the daemons that are not active
continue
if nb_cfg_prepared > realm.get_nb_of_must_have_satellites(sat_type):
logger.warning("Too much configuration parts prepared "
"for the expected satellites count. "
"Realm: %s, satellite: %s - prepared: %d out of %d",
realm.name, sat_link.name, nb_cfg_prepared,
realm.get_nb_of_must_have_satellites(sat_type))
# Fred - 2018-07-20 - temporary disable this error raising!
# raise DispatcherError("Too much configuration parts prepared "
# "for the expected satellites count. "
# "This should never happen!")
logger.info(" preparing configuration part '%s' for the %s '%s'",
cfg_part.instance_id, sat_type, sat_link.name)
sat_link.cfg.update({
# Global instance configuration
'arbiters': arbiters_cfg if sat_link.manage_arbiters else {},
'modules': serialize(sat_link.modules, True),
'managed_conf_id': 'see_my_schedulers',
'global_conf': self.global_conf
})
sat_link.cfg['schedulers'].update({
cfg_part.uuid: realm.to_satellites[sat_type][cfg_part.instance_id]})
# Brokers should have pollers and reactionners links too
if sat_type == "broker":
sat_link.cfg.update({'satellites': realm.get_links_for_a_broker(
self.pollers, self.reactionners, self.receivers,
self.alignak_conf.realms, sat_link.manage_sub_realms)})
# Hash the whole configuration
cfg_string = json.dumps(sat_link.cfg, sort_keys=True).encode('utf-8')
sat_link.cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(sat_link.cfg)
logger.info(' %s configuration size: %d bytes',
sat_type, sys.getsizeof(pickled_conf))
# The configuration part is assigned to a satellite
sat_link.cfg_to_manage = cfg_part
sat_link.push_flavor = cfg_part.push_flavor
sat_link.hash = sat_link.cfg['hash']
sat_link.need_conf = False
sat_link.configuration_sent = False
logger.info(' configuration %s (%s) assigned to %s',
cfg_part.instance_id, cfg_part.push_flavor, sat_link.name)
nb_cfg_prepared += 1
realm.to_satellites_managed_by[sat_type][
cfg_part.instance_id].append(sat_link)
# I've got enough satellite, the next ones are considered unuseful!
if nb_cfg_prepared == realm.get_nb_of_must_have_satellites(sat_type):
logger.info(" no more %s needed in this realm.", sat_type)
realm.to_satellites_need_dispatch[sat_type][
cfg_part.instance_id] = False
nb_missed = len([cfg for cfg in list(
self.alignak_conf.parts.values()) if not cfg.is_assigned])
if nb_missed > 0:
logger.warning("Some configuration parts are not dispatched, %d are missing", nb_missed)
else:
logger.info("All configuration parts are assigned "
"to schedulers and their satellites :)")
# Schedulers without a configuration in a dispatch ok do not need a configuration
# so they do not raise dispatching errors if they are not used
for scheduler_link in self.schedulers:
if not scheduler_link.cfg_to_manage:
# "so it do not ask anymore for conf"
logger.warning('The scheduler %s do not need a configuration!', scheduler_link.name)
scheduler_link.need_conf = False | [
"\n Prepare dispatch, so prepare for each daemon (schedulers, brokers, receivers, reactionners,\n pollers)\n\n This function will only prepare something if self.new_to_dispatch is False\n It will reset the first_dispatch_done flag\n\n A DispatcherError exception is raised if a configuration is already prepared! Unset the\n new_to_dispatch flag before calling!\n\n :return: None\n "
] |
Please provide a description of the function:def dispatch(self, test=False): # pylint: disable=too-many-branches
if not self.new_to_dispatch:
raise DispatcherError("Dispatcher cannot dispatch, "
"because no configuration is prepared!")
if self.first_dispatch_done:
raise DispatcherError("Dispatcher cannot dispatch, "
"because the configuration is still dispatched!")
if self.dispatch_ok:
logger.info("Dispatching is already done and ok...")
return
logger.info("Trying to send configuration to the satellites...")
self.dispatch_ok = True
# todo: the 3 loops hereunder may be factorized
for link in self.arbiters:
# If not me and a spare arbiter...
if link == self.arbiter_link:
# I exclude myself from the dispatching, I have my configuration ;)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.spare:
# Do not dispatch to a master arbiter!
continue
if link.configuration_sent:
logger.debug("Arbiter %s already sent!", link.name)
continue
if not link.reachable:
logger.debug("Arbiter %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the arbiter %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
# Now that the spare arbiter has a configuration, tell him it must not run,
# because I'm not dead ;)
link.do_not_run()
for link in self.schedulers:
if link.configuration_sent:
logger.debug("Scheduler %s already sent!", link.name)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.reachable:
logger.debug("Scheduler %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the scheduler %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
for link in self.satellites:
if link.configuration_sent:
logger.debug("%s %s already sent!", link.type, link.name)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.reachable:
logger.warning("%s %s is not reachable to receive its configuration",
link.type, link.name)
continue
logger.info("Sending configuration to the %s %s", link.type, link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
if self.dispatch_ok:
# Newly prepared configuration got dispatched correctly
self.new_to_dispatch = False
self.first_dispatch_done = True | [
"\n Send configuration to satellites\n\n :return: None\n "
] |
Please provide a description of the function:def stop_request(self, stop_now=False):
all_ok = True
for daemon_link in self.all_daemons_links:
logger.debug("Stopping: %s (%s)", daemon_link, stop_now)
if daemon_link == self.arbiter_link:
# I exclude myself from the process, I know we are going to stop ;)
continue
if not daemon_link.active:
# I exclude the daemons that are not active
continue
# Send a stop request to the daemon
try:
stop_ok = daemon_link.stop_request(stop_now=stop_now)
except LinkError:
stop_ok = True
logger.warning("Daemon stop request failed, %s probably stopped!", daemon_link)
all_ok = all_ok and stop_ok
daemon_link.stopping = True
self.stop_request_sent = all_ok
return self.stop_request_sent | [
"Send a stop request to all the daemons\n\n :param stop_now: stop now or go to stop wait mode\n :type stop_now: bool\n :return: True if all daemons are reachable\n "
] |
Please provide a description of the function:def pythonize(self, val):
__boolean_states__ = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
if isinstance(val, bool):
return val
val = unique_value(val).lower()
if val in list(__boolean_states__.keys()):
return __boolean_states__[val]
raise PythonizeError("Cannot convert '%s' to a boolean value" % val) | [
"Convert value into a boolean\n\n :param val: value to convert\n :type val: bool, int, str\n :return: boolean corresponding to value ::\n\n {'1': True, 'yes': True, 'true': True, 'on': True,\n '0': False, 'no': False, 'false': False, 'off': False}\n\n :rtype: bool\n "
] |
Please provide a description of the function:def pythonize(self, val):
if isinstance(val, list):
return [s.strip() if hasattr(s, "strip") else s
for s in list_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty]
return [s.strip() if hasattr(s, "strip") else s
for s in to_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty] | [
"Convert value into a list::\n\n * split value (or each element if value is a list) on coma char\n * strip split values\n\n :param val: value to convert\n :type val: str\n :return: list corresponding to value\n :rtype: list\n "
] |
Please provide a description of the function:def pythonize(self, val):
val = unique_value(val)
def split(keyval):
matches = re.match(r"^\s*([^\s]+)\s*=\s*([^\s]+)\s*$", keyval)
if matches is None:
raise ValueError
return (
matches.group(1),
# >2.4 only. we keep it for later. m.group(2) if self.elts_prop is None
# else self.elts_prop.pythonize(m.group(2))
(self.elts_prop.pythonize(matches.group(2)),
matches.group(2))[self.elts_prop is None]
)
if val is None:
return dict()
if self.elts_prop is None:
return val
# val is in the form "key1=addr:[port],key2=addr:[port],..."
return dict([split(kv) for kv in to_split(val)]) | [
"Convert value into a dict::\n\n * If value is a list, try to take the last element\n * split \"key=value\" string and convert to { key:value }\n\n :param val: value to convert\n :type val:\n :return: log level corresponding to value\n :rtype: str\n ",
"Split key-value string into (key,value)\n\n :param keyval: key value string\n :return: key, value\n :rtype: tuple\n "
] |
Please provide a description of the function:def pythonize(self, val):
val = unique_value(val)
matches = re.match(r"^([^:]*)(?::(\d+))?$", val)
if matches is None:
raise ValueError
addr = {'address': matches.group(1)}
if matches.group(2) is not None:
addr['port'] = int(matches.group(2))
return addr | [
"Convert value into a address ip format::\n\n * If value is a list, try to take the last element\n * match ip address and port (if available)\n\n :param val: value to convert\n :type val:\n :return: address/port corresponding to value\n :rtype: dict\n "
] |
Please provide a description of the function:def pythonize(self, val):
if isinstance(val, list) and len(set(val)) == 1:
# If we have a list with a unique value just use it
return val[0]
# Well, can't choose to remove something.
return val | [
"If value is a single list element just return the element\n does nothing otherwise\n\n :param val: value to convert\n :type val:\n :return: converted value\n :rtype:\n "
] |
Please provide a description of the function:def pythonize(self, val):
val = super(IntListProp, self).pythonize(val)
try:
return [int(e) for e in val]
except ValueError as value_except:
raise PythonizeError(str(value_except)) | [
"Convert value into a integer list::\n\n * Try to convert into a list\n * Convert each element into a int\n\n :param val: value to convert\n :type val:\n :return: integer list corresponding to value\n :rtype: list[int]\n "
] |
Please provide a description of the function:def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
# pylint: disable=too-many-arguments
logger.debug("Parameters for get_response:")
logger.debug("\t - endpoint: %s", endpoint)
logger.debug("\t - method: %s", method)
logger.debug("\t - headers: %s", headers)
logger.debug("\t - json: %s", json)
logger.debug("\t - params: %s", params)
logger.debug("\t - data: %s", data)
url = self.get_url(endpoint)
# First stage. Errors are connection errors (timeout, no session, ...)
try:
response = self.session.request(method=method, url=url, headers=headers, json=json,
params=params, data=data, timeout=self.timeout)
logger.debug("response headers: %s", response.headers)
logger.debug("response content: %s", response.content)
except RequestException as exp:
response = {"_status": "ERR",
"_error": {"message": exp},
"_issues": {"message": exp}}
return response | [
"\n Returns the response from the requested endpoint with the requested method\n :param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)\n :param endpoint: str. the relative endpoint to access\n :param params: (optional) Dictionary or bytes to be sent in the query string\n for the :class:`Request`.\n :param data: (optional) Dictionary, bytes, or file-like object to send in the body\n of the :class:`Request`.\n :param json: (optional) json to send in the body of the :class:`Request`.\n :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.\n :return: Requests.response\n "
] |
Please provide a description of the function:def decode(response):
# Second stage. Errors are backend errors (bad login, bad url, ...)
try:
response.raise_for_status()
except requests.HTTPError as exp:
response = {"_status": "ERR",
"_error": {"message": exp, "code": response.status_code},
"_issues": {"message": exp, "code": response.status_code}}
return response
else:
return response.json() | [
"\n Decodes and returns the response as JSON (dict) or raise BackendException\n :param response: requests.response object\n :return: dict\n "
] |
Please provide a description of the function:def login(self, username, password):
logger.debug("login for: %s", username)
# Configured as not authenticated WS
if not username and not password:
self.set_token(token=None)
return False
if not username or not password:
logger.error("Username or password cannot be None!")
self.set_token(token=None)
return False
endpoint = 'login'
json = {'username': username, 'password': password}
response = self.get_response(method='POST', endpoint=endpoint, json=json)
if response.status_code == 401:
logger.error("Access denied to %s", self.url_endpoint_root)
self.set_token(token=None)
return False
resp = self.decode(response=response)
if 'token' in resp:
self.set_token(token=resp['token'])
return True
return False | [
"\n Log into the WS interface and get the authentication token\n\n if login is:\n - accepted, returns True\n - refused, returns False\n\n In case of any error, raises a BackendException\n\n :param username: login name\n :type username: str\n :param password: password\n :type password: str\n :param generate: Can have these values: enabled | force | disabled\n :type generate: str\n :return: return True if authentication is successfull, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def logout(self):
logger.debug("request backend logout")
if not self.authenticated:
logger.warning("Unnecessary logout ...")
return True
endpoint = 'logout'
_ = self.get_response(method='POST', endpoint=endpoint)
self.session.close()
self.set_token(token=None)
return True | [
"\n Logout from the backend\n\n :return: return True if logout is successfull, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get(self, endpoint, params=None):
response = self.get_response(method='GET', endpoint=endpoint, params=params)
resp = self.decode(response=response)
if '_status' not in resp: # pragma: no cover - need specific backend tests
resp['_status'] = u'OK' # TODO: Sure??
return resp | [
"\n Get items or item in alignak backend\n\n If an error occurs, a BackendException is raised.\n\n This method builds a response as a dictionary that always contains: _items and _status::\n\n {\n u'_items': [\n ...\n ],\n u'_status': u'OK'\n }\n\n :param endpoint: endpoint (API URL) relative from root endpoint\n :type endpoint: str\n :param params: parameters for the backend API\n :type params: dict\n :return: dictionary as specified upper\n :rtype: dict\n "
] |
Please provide a description of the function:def post(self, endpoint, data, files=None, headers=None):
# pylint: disable=unused-argument
# We let Requests encode data to json
response = self.get_response(method='POST', endpoint=endpoint, json=data, headers=headers)
resp = self.decode(response=response)
return resp | [
"\n Create a new item\n\n :param endpoint: endpoint (API URL)\n :type endpoint: str\n :param data: properties of item to create\n :type data: dict\n :param files: Not used. To be implemented\n :type files: None\n :param headers: headers (example: Content-Type)\n :type headers: dict\n :return: response (creation information)\n :rtype: dict\n "
] |
Please provide a description of the function:def patch(self, endpoint, data):
response = self.get_response(method='PATCH', endpoint=endpoint, json=data,
headers={'Content-Type': 'application/json'})
if response.status_code == 200:
return self.decode(response=response)
return response | [
"\n Method to update an item\n\n The headers must include an If-Match containing the object _etag.\n headers = {'If-Match': contact_etag}\n\n The data dictionary contain the fields that must be modified.\n\n If the patching fails because the _etag object do not match with the provided one, a\n BackendException is raised with code = 412.\n\n If inception is True, this method makes e new get request on the endpoint to refresh the\n _etag and then a new patch is called.\n\n If an HTTP 412 error occurs, a BackendException is raised. This exception is:\n - code: 412\n - message: response content\n - response: backend response\n\n All other HTTP error raises a BackendException.\n If some _issues are provided by the backend, this exception is:\n - code: HTTP error code\n - message: response content\n - response: JSON encoded backend response (including '_issues' dictionary ...)\n\n If no _issues are provided and an _error is signaled by the backend, this exception is:\n - code: backend error code\n - message: backend error message\n - response: JSON encoded backend response\n\n :param endpoint: endpoint (API URL)\n :type endpoint: str\n :param data: properties of item to update\n :type data: dict\n :param headers: headers (example: Content-Type). 'If-Match' required\n :type headers: dict\n :param inception: if True tries to get the last _etag\n :type inception: bool\n :return: dictionary containing patch response from the backend\n :rtype: dict\n "
] |
Please provide a description of the function:def sanitize_name(field_name):
if not field_name:
return field_name
# Sanitize field name for TSDB (Graphite or Influx):
sanitized = field_name.strip()
if sanitized.startswith('/'):
sanitized = '_' + sanitized[1:]
# + becomes a _
sanitized = sanitized.replace("+", "_")
# / becomes a -
sanitized = sanitized.replace("/", "-")
# space becomes a _
sanitized = sanitized.replace(" ", "_")
# % becomes _pct
sanitized = sanitized.replace("%", "_pct")
# all character not in [a-zA-Z_-0-9.] is removed
sanitized = re.sub(r'[^a-zA-Z_\-0-9\.\$]', '', sanitized)
return sanitized | [
"Sanitize a field name for a TSDB (Graphite or Influx)\n - remove not allowed characters from the field name\n and replace with authorized characters\n\n :param field_name: Field name to clean\n :type field_name: string\n :return: sanitized field name\n "
] |
Please provide a description of the function:def init(self, conf):
# For searching class and elements for on-demand
# we need link to types
self.my_conf = conf
self.lists_on_demand = []
self.hosts = self.my_conf.hosts
# For special void host_name handling...
self.host_class = self.hosts.inner_class
self.lists_on_demand.append(self.hosts)
self.services = self.my_conf.services
self.contacts = self.my_conf.contacts
self.lists_on_demand.append(self.contacts)
self.hostgroups = self.my_conf.hostgroups
self.lists_on_demand.append(self.hostgroups)
self.commands = self.my_conf.commands
self.servicegroups = self.my_conf.servicegroups
self.lists_on_demand.append(self.servicegroups)
self.contactgroups = self.my_conf.contactgroups
self.lists_on_demand.append(self.contactgroups)
self.illegal_macro_output_chars = self.my_conf.illegal_macro_output_chars
self.env_prefix = self.my_conf.env_variables_prefix | [
"Initialize MacroResolver instance with conf.\n Must be called at least once.\n\n :param conf: configuration to load\n :type conf: alignak.objects.Config\n :return: None\n "
] |
Please provide a description of the function:def _get_macros(chain):
regex = re.compile(r'(\$)')
elts = regex.split(chain)
macros = {}
in_macro = False
for elt in elts:
if elt == '$':
in_macro = not in_macro
elif in_macro:
macros[elt] = {'val': '', 'type': 'unknown'}
return macros | [
"Get all macros of a chain\n Cut '$' char and create a dict with the following structure::\n\n { 'MacroSTR1' : {'val': '', 'type': 'unknown'}\n 'MacroSTR2' : {'val': '', 'type': 'unknown'}\n }\n\n :param chain: chain to parse\n :type chain: str\n :return: dict with macro parsed as key\n :rtype: dict\n "
] |
Please provide a description of the function:def _get_value_from_element(self, elt, prop):
# pylint: disable=too-many-return-statements
args = None
# We have args to provide to the function
if isinstance(prop, tuple):
prop, args = prop
value = getattr(elt, prop, None)
if value is None:
return 'n/a'
try:
# If the macro is set to a list property
if isinstance(value, list):
# Return the list items, comma separated and bracketed
return "[%s]" % ','.join(value)
# If the macro is not set as a function to call
if not isinstance(value, collections.Callable):
return value
# Case of a function call with no arguments
if not args:
return value()
# Case where we need args to the function
# ex : HOSTGROUPNAME (we need hostgroups)
# ex : SHORTSTATUS (we need hosts and services if bp_rule)
real_args = []
for arg in args:
real_args.append(getattr(self, arg, None))
return value(*real_args)
except AttributeError:
# Commented because there are many unresolved macros and this log is spamming :/
# # Raise a warning and return a strange value when macro cannot be resolved
# warnings.warn(
# 'Error when getting the property value for a macro: %s',
# MacroWarning, stacklevel=2)
# Return a strange value when macro cannot be resolved
return 'n/a'
except UnicodeError:
if isinstance(value, string_types):
return str(value, 'utf8', errors='ignore')
return 'n/a' | [
"Get value from an element's property.\n\n the property may be a function to call.\n\n If the property is not resolved (because not implemented), this function will return 'n/a'\n\n :param elt: element\n :type elt: object\n :param prop: element property\n :type prop: str\n :return: getattr(elt, prop) or getattr(elt, prop)() (call)\n :rtype: str\n "
] |
Please provide a description of the function:def _delete_unwanted_caracters(self, chain):
try:
chain = chain.decode('utf8', 'replace')
except UnicodeEncodeError:
# If it is still encoded correctly, ignore...
pass
except AttributeError:
# Python 3 will raise an exception because the line is still unicode
pass
for char in self.illegal_macro_output_chars:
chain = chain.replace(char, '')
return chain | [
"Remove not wanted char from chain\n unwanted char are illegal_macro_output_chars attribute\n\n :param chain: chain to remove char from\n :type chain: str\n :return: chain cleaned\n :rtype: str\n "
] |
Please provide a description of the function:def get_env_macros(self, data):
env = {}
for obj in data:
cls = obj.__class__
macros = cls.macros
for macro in macros:
if macro.startswith("USER"):
continue
prop = macros[macro]
value = self._get_value_from_element(obj, prop)
env['%s%s' % (self.env_prefix, macro)] = value
if hasattr(obj, 'customs'):
# make NAGIOS__HOSTMACADDR from _MACADDR
for cmacro in obj.customs:
new_env_name = '%s_%s%s' % (self.env_prefix,
obj.__class__.__name__.upper(),
cmacro[1:].upper())
env[new_env_name] = obj.customs[cmacro]
return env | [
"Get all environment macros from data\n For each object in data ::\n\n * Fetch all macros in object.__class__.macros\n * Fetch all customs macros in o.custom\n\n :param data: data to get macro\n :type data:\n :return: dict with macro name as key and macro value as value\n :rtype: dict\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.