Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def resolve_simple_macros_in_string(self, c_line, data, macromodulations, timeperiods,
args=None):
# pylint: disable=too-many-locals, too-many-branches, too-many-nested-blocks
# Now we prepare the classes for looking at the class.macros
data.append(self) # For getting global MACROS
if hasattr(self, 'my_conf'):
data.append(self.my_conf) # For USERN macros
# we should do some loops for nested macros
# like $USER1$ hiding like a ninja in a $ARG2$ Macro. And if
# $USER1$ is pointing to $USER34$ etc etc, we should loop
# until we reach the bottom. So the last loop is when we do
# not still have macros :)
still_got_macros = True
nb_loop = 0
while still_got_macros:
nb_loop += 1
# Ok, we want the macros in the command line
macros = self._get_macros(c_line)
# Put in the macros the type of macro for all macros
self._get_type_of_macro(macros, data)
# We can get out if we do not have macros this loop
still_got_macros = False
if macros:
still_got_macros = True
# Now we get values from elements
for macro in macros:
# If type ARGN, look at ARGN cutting
if macros[macro]['type'] == 'ARGN' and args is not None:
macros[macro]['val'] = self._resolve_argn(macro, args)
macros[macro]['type'] = 'resolved'
# If object type, get value from a property
if macros[macro]['type'] == 'object':
obj = macros[macro]['object']
if obj not in data:
continue
prop = obj.macros[macro]
if not prop:
continue
macros[macro]['val'] = self._get_value_from_element(obj, prop)
# Now check if we do not have a 'output' macro. If so, we must
# delete all special characters that can be dangerous
if macro in self.output_macros:
logger.debug("-> macro from: %s, %s = %s", obj, macro, macros[macro])
macros[macro]['val'] = self._delete_unwanted_caracters(macros[macro]['val'])
# If custom type, get value from an object custom variables
if macros[macro]['type'] == 'CUSTOM':
cls_type = macros[macro]['class']
# Beware : only cut the first _HOST or _SERVICE or _CONTACT value,
# so the macro name can have it on it..
macro_name = re.split('_' + cls_type, macro, 1)[1].upper()
logger.debug(" ->: %s - %s", cls_type, macro_name)
# Ok, we've got the macro like MAC_ADDRESS for _HOSTMAC_ADDRESS
# Now we get the element in data that have the type HOST
# and we check if it got the custom value
for elt in data:
if not elt or elt.__class__.my_type.upper() != cls_type:
continue
logger.debug(" : for %s: %s", elt, elt.customs)
if not getattr(elt, 'customs'):
continue
if '_' + macro_name in elt.customs:
macros[macro]['val'] = elt.customs['_' + macro_name]
logger.debug(" : macro %s = %s", macro, macros[macro]['val'])
# Then look on the macromodulations, in reverse order, so
# the last defined will be the first applied
mms = getattr(elt, 'macromodulations', [])
for macromodulation_id in mms[::-1]:
macromodulation = macromodulations[macromodulation_id]
if not macromodulation.is_active(timeperiods):
continue
# Look if the modulation got the value,
# but also if it's currently active
if "_%s" % macro_name in macromodulation.customs:
macros[macro]['val'] = macromodulation.customs["_%s" % macro_name]
# If on-demand type, get value from an dynamic provided data objects
if macros[macro]['type'] == 'ONDEMAND':
macros[macro]['val'] = self._resolve_ondemand(macro, data)
# We resolved all we can, now replace the macros in the command call
for macro in macros:
c_line = c_line.replace("$%s$" % macro, "%s" % (macros[macro]['val']))
# A $$ means we want a $, it's not a macro!
# We replace $$ by a big dirty thing to be sure to not misinterpret it
c_line = c_line.replace("$$", "DOUBLEDOLLAR")
if nb_loop > 32: # too much loop, we exit
still_got_macros = False
# We now replace the big dirty token we made by only a simple $
c_line = c_line.replace("DOUBLEDOLLAR", "$")
return c_line.strip() | [
"Replace macro in the command line with the real value\n\n :param c_line: command line to modify\n :type c_line: str\n :param data: objects list, use to look for a specific macro\n :type data:\n :param macromodulations: the available macro modulations\n :type macromodulations: dict\n :param timeperiods: the available timeperiods\n :type timeperiods: dict\n :param args: args given to the command line, used to get \"ARGN\" macros.\n :type args:\n :return: command line with '$MACRO$' replaced with values\n :rtype: str\n "
] |
Please provide a description of the function:def resolve_command(self, com, data, macromodulations, timeperiods):
logger.debug("Resolving: macros in: %s, arguments: %s",
com.command.command_line, com.args)
return self.resolve_simple_macros_in_string(com.command.command_line, data,
macromodulations, timeperiods,
args=com.args) | [
"Resolve command macros with data\n\n :param com: check / event handler or command call object\n :type com: object\n :param data: objects list, used to search for a specific macro (custom or object related)\n :type data:\n :return: command line with '$MACRO$' replaced with values\n :param macromodulations: the available macro modulations\n :type macromodulations: dict\n :param timeperiods: the available timeperiods\n :type timeperiods: dict\n :rtype: str\n "
] |
Please provide a description of the function:def _get_type_of_macro(macros, objs):
r
for macro in macros:
# ARGN Macros
if re.match(r'ARG\d', macro):
macros[macro]['type'] = 'ARGN'
continue
# USERN macros
# are managed in the Config class, so no
# need to look that here
elif re.match(r'_HOST\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'HOST'
continue
elif re.match(r'_SERVICE\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'SERVICE'
# value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1]
continue
elif re.match(r'_CONTACT\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'CONTACT'
continue
# On demand macro
elif len(macro.split(':')) > 1:
macros[macro]['type'] = 'ONDEMAND'
continue
# OK, classical macro...
for obj in objs:
if macro in obj.macros:
macros[macro]['type'] = 'object'
macros[macro]['object'] = obj
continue | [
"Set macros types\n\n Example::\n\n ARG\\d -> ARGN,\n HOSTBLABLA -> class one and set Host in class)\n _HOSTTOTO -> HOST CUSTOM MACRO TOTO\n SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1\n\n :param macros: macros list in a dictionary\n :type macros: dict\n :param objs: objects list, used to tag object macros\n :type objs: list\n :return: None\n "
] |
Please provide a description of the function:def _resolve_argn(macro, args):
# first, get the number of args
_id = None
matches = re.search(r'ARG(?P<id>\d+)', macro)
if matches is not None:
_id = int(matches.group('id')) - 1
try:
return args[_id]
except IndexError:
# Required argument not found, returns an empty string
return ''
return '' | [
"Get argument from macro name\n ie : $ARG3$ -> args[2]\n\n :param macro: macro to parse\n :type macro:\n :param args: args given to command line\n :type args:\n :return: argument at position N-1 in args table (where N is the int parsed)\n :rtype: None | str\n "
] |
Please provide a description of the function:def _resolve_ondemand(self, macro, data):
# pylint: disable=too-many-locals
elts = macro.split(':')
nb_parts = len(elts)
macro_name = elts[0]
# 3 parts for a service, 2 for all others types...
if nb_parts == 3:
val = ''
(host_name, service_description) = (elts[1], elts[2])
# host_name can be void, so it's the host in data
# that is important. We use our self.host_class to
# find the host in the data :)
if host_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
host_name = elt.host_name
# Ok now we get service
serv = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if serv is not None:
cls = serv.__class__
prop = cls.macros[macro_name]
val = self._get_value_from_element(serv, prop)
return val
# Ok, service was easy, now hard part
else:
val = ''
elt_name = elts[1]
# Special case: elt_name can be void
# so it's the host where it apply
if elt_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
elt_name = elt.host_name
for od_list in self.lists_on_demand:
cls = od_list.inner_class
# We search our type by looking at the macro
if macro_name in cls.macros:
prop = cls.macros[macro_name]
i = od_list.find_by_name(elt_name)
if i is not None:
val = self._get_value_from_element(i, prop)
# Ok we got our value :)
break
return val
# Return a strange value in this case rather than an empty string
return 'n/a' | [
"Get on demand macro value\n\n If the macro cannot be resolved, this function will return 'n/a' rather than\n an empty string, this to alert the caller of a potential problem.\n\n :param macro: macro to parse\n :type macro:\n :param data: data to get value from\n :type data:\n :return: macro value\n :rtype: str\n "
] |
Please provide a description of the function:def _tot_hosts_by_state(self, state=None, state_type=None):
if state is None and state_type is None:
return len(self.hosts)
if state_type:
return sum(1 for h in self.hosts if h.state == state and h.state_type == state_type)
return sum(1 for h in self.hosts if h.state == state) | [
"Generic function to get the number of host in the specified state\n\n :param state: state to filter on\n :type state: str\n :param state_type: state type to filter on (HARD, SOFT)\n :type state_type: str\n :return: number of host in state *state*\n :rtype: int\n "
] |
Please provide a description of the function:def _tot_unhandled_hosts_by_state(self, state):
return sum(1 for h in self.hosts if h.state == state and h.state_type == u'HARD' and
h.is_problem and not h.problem_has_been_acknowledged) | [
"Generic function to get the number of unhandled problem hosts in the specified state\n\n :param state: state to filter on\n :type state:\n :return: number of host in state *state* and which are not acknowledged problems\n :rtype: int\n "
] |
Please provide a description of the function:def _get_total_hosts_problems_unhandled(self):
return sum(1 for h in self.hosts if h.is_problem and not h.problem_has_been_acknowledged) | [
"\n Get the number of host problems not handled\n\n :return: Number of hosts which are problems and not handled\n :rtype: int\n "
] |
Please provide a description of the function:def _get_total_hosts_problems_handled(self):
return sum(1 for h in self.hosts if h.is_problem and h.problem_has_been_acknowledged) | [
"\n Get the number of host problems not handled\n\n :return: Number of hosts which are problems and not handled\n :rtype: int\n "
] |
Please provide a description of the function:def _get_total_hosts_not_monitored(self):
return sum(1 for h in self.hosts if not h.active_checks_enabled and
not h.passive_checks_enabled) | [
"\n Get the number of host not monitored (active and passive checks disabled)\n\n :return: Number of hosts which are not monitored\n :rtype: int\n "
] |
Please provide a description of the function:def _tot_services_by_state(self, state=None, state_type=None):
if state is None and state_type is None:
return len(self.services)
if state_type:
return sum(1 for s in self.services if s.state == state and s.state_type == state_type)
return sum(1 for s in self.services if s.state == state) | [
"Generic function to get the number of services in the specified state\n\n :param state: state to filter on\n :type state: str\n :param state_type: state type to filter on (HARD, SOFT)\n :type state_type: str\n :return: number of host in state *state*\n :rtype: int\n TODO: Should be moved\n "
] |
Please provide a description of the function:def _tot_unhandled_services_by_state(self, state):
return sum(1 for s in self.services if s.state == state and
s.is_problem and not s.problem_has_been_acknowledged) | [
"Generic function to get the number of unhandled problem services in the specified state\n\n :param state: state to filter on\n :type state:\n :return: number of service in state *state* and which are not acknowledged problems\n :rtype: int\n "
] |
Please provide a description of the function:def _get_total_services_problems_unhandled(self):
return sum(1 for s in self.services if s.is_problem and not s.problem_has_been_acknowledged) | [
"Get the number of services that are a problem and that are not acknowledged\n\n :return: number of problem services which are not acknowledged\n :rtype: int\n "
] |
Please provide a description of the function:def _get_total_services_problems_handled(self):
return sum(1 for s in self.services if s.is_problem and s.problem_has_been_acknowledged) | [
"\n Get the number of service problems not handled\n\n :return: Number of services which are problems and not handled\n :rtype: int\n "
] |
Please provide a description of the function:def _get_total_services_not_monitored(self):
return sum(1 for s in self.services if not s.active_checks_enabled and
not s.passive_checks_enabled) | [
"\n Get the number of service not monitored (active and passive checks disabled)\n\n :return: Number of services which are not monitored\n :rtype: int\n "
] |
Please provide a description of the function:def add_data(self, metric, value, ts=None):
if not ts:
ts = time.time()
if self.__data_lock.acquire():
self.__data.append((metric, (ts, value)))
self.__data_lock.release()
return True
return False | [
"\n Add data to queue\n\n :param metric: the metric name\n :type metric: str\n :param value: the value of data\n :type value: int\n :param ts: the timestamp\n :type ts: int | None\n :return: True if added successfully, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def add_data_dict(self, dd): # pragma: no cover - never used...
if self.__data_lock.acquire():
for k, v in list(dd.items()):
ts = v.get('ts', time.time())
value = v.get('value')
self.__data.append((k, (ts, value)))
self.__data_lock.release()
return True
return False | [
"\n dd must be a dictionary where keys are the metric name,\n each key contains a dictionary which at least must have 'value' key (optionally 'ts')\n\n dd = {'experiment1.subsystem.block.metric1': {'value': 12.3, 'ts': 1379491605.55},\n 'experiment1.subsystem.block.metric2': {'value': 1.35},\n ...}\n "
] |
Please provide a description of the function:def add_data_list(self, dl): # pragma: no cover - never used...
if self.__data_lock.acquire():
self.__data.extend(dl)
self.__data_lock.release()
return True
return False | [
"\n dl must be a list of tuples like:\n dl = [('metricname', (timestamp, value)),\n ('metricname', (timestamp, value)),\n ...]\n "
] |
Please provide a description of the function:def send_data(self, data=None):
save_in_error = False
if not data:
if self.__data_lock.acquire():
data = self.__data
self.__data = []
save_in_error = True
self.__data_lock.release()
else:
return False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
payload = pickle.dumps(data, protocol=2)
header = struct.pack("!L", len(payload))
message = header + payload
s.settimeout(1)
s.connect((self.host, self.port))
try:
s.send(message)
except:
# log.exception('Error when sending data to carbon')
if save_in_error:
self.__data.extend(data)
return False
else:
# log.debug('Sent data to {host}:{port}: {0} metrics, {1} bytes'.format(len(data),
# len(message), host = self.host, port=self.port))
return True
finally:
s.close() | [
"If data is empty, current buffer is sent. Otherwise data must be like:\n data = [('metricname', (timestamp, value)),\n ('metricname', (timestamp, value)),\n ...]\n "
] |
Please provide a description of the function:def set_daemon_name(self, daemon_name):
self.daemon_name = daemon_name
for instance in self.instances:
instance.set_loaded_into(daemon_name) | [
"Set the daemon name of the daemon which this manager is attached to\n and propagate this daemon name to our managed modules\n\n :param daemon_name:\n :return:\n "
] |
Please provide a description of the function:def load_and_init(self, modules):
self.load(modules)
self.get_instances()
return len(self.configuration_errors) == 0 | [
"Import, instantiate & \"init\" the modules we manage\n\n :param modules: list of the managed modules\n :return: True if no errors\n "
] |
Please provide a description of the function:def load(self, modules):
self.modules_assoc = []
for module in modules:
if not module.enabled:
logger.info("Module %s is declared but not enabled", module.name)
# Store in our modules list but do not try to load
# Probably someone else will load this module later...
self.modules[module.uuid] = module
continue
logger.info("Importing Python module '%s' for %s...", module.python_name, module.name)
try:
python_module = importlib.import_module(module.python_name)
# Check existing module properties
# Todo: check all mandatory properties
if not hasattr(python_module, 'properties'): # pragma: no cover
self.configuration_errors.append("Module %s is missing a 'properties' "
"dictionary" % module.python_name)
raise AttributeError
logger.info("Module properties: %s", getattr(python_module, 'properties'))
# Check existing module get_instance method
if not hasattr(python_module, 'get_instance') or \
not isinstance(getattr(python_module, 'get_instance'),
collections.Callable): # pragma: no cover
self.configuration_errors.append("Module %s is missing a 'get_instance' "
"function" % module.python_name)
raise AttributeError
self.modules_assoc.append((module, python_module))
logger.info("Imported '%s' for %s", module.python_name, module.name)
except ImportError as exp: # pragma: no cover, simple protection
self.configuration_errors.append("Module %s (%s) can't be loaded, Python "
"importation error: %s" % (module.python_name,
module.name,
str(exp)))
except AttributeError: # pragma: no cover, simple protection
self.configuration_errors.append("Module %s (%s) can't be loaded, "
"module configuration" % (module.python_name,
module.name))
else:
logger.info("Loaded Python module '%s' (%s)", module.python_name, module.name) | [
"Load Python modules and check their usability\n\n :param modules: list of the modules that must be loaded\n :return:\n "
] |
Please provide a description of the function:def try_instance_init(self, instance, late_start=False):
try:
instance.init_try += 1
# Maybe it's a retry
if not late_start and instance.init_try > 1:
# Do not try until too frequently, or it's too loopy
if instance.last_init_try > time.time() - MODULE_INIT_PERIOD:
logger.info("Too early to retry initialization, retry period is %d seconds",
MODULE_INIT_PERIOD)
# logger.info("%s / %s", instance.last_init_try, time.time())
return False
instance.last_init_try = time.time()
logger.info("Trying to initialize module: %s", instance.name)
# If it's an external module, create/update Queues()
if instance.is_external:
instance.create_queues(self.daemon.sync_manager)
# The module instance init function says if initialization is ok
if not instance.init():
logger.warning("Module %s initialisation failed.", instance.name)
return False
logger.info("Module %s is initialized.", instance.name)
except Exception as exp: # pylint: disable=broad-except
# pragma: no cover, simple protection
msg = "The module instance %s raised an exception " \
"on initialization: %s, I remove it!" % (instance.name, str(exp))
self.configuration_errors.append(msg)
logger.error(msg)
logger.exception(exp)
return False
return True | [
"Try to \"initialize\" the given module instance.\n\n :param instance: instance to init\n :type instance: object\n :param late_start: If late_start, don't look for last_init_try\n :type late_start: bool\n :return: True on successful init. False if instance init method raised any Exception.\n :rtype: bool\n "
] |
Please provide a description of the function:def clear_instances(self, instances=None):
if instances is None:
instances = self.instances[:] # have to make a copy of the list
for instance in instances:
self.remove_instance(instance) | [
"Request to \"remove\" the given instances list or all if not provided\n\n :param instances: instances to remove (all instances are removed if None)\n :type instances:\n :return: None\n "
] |
Please provide a description of the function:def set_to_restart(self, instance):
self.to_restart.append(instance)
if instance.is_external:
instance.proc = None | [
"Put an instance to the restart queue\n\n :param instance: instance to restart\n :type instance: object\n :return: None\n "
] |
Please provide a description of the function:def get_instances(self):
self.clear_instances()
for (alignak_module, python_module) in self.modules_assoc:
alignak_module.properties = python_module.properties.copy()
alignak_module.my_daemon = self.daemon
logger.info("Alignak starting module '%s'", alignak_module.get_name())
if getattr(alignak_module, 'modules', None):
modules = []
for module_uuid in alignak_module.modules:
if module_uuid in self.modules:
modules.append(self.modules[module_uuid])
alignak_module.modules = modules
logger.debug("Module '%s', parameters: %s",
alignak_module.get_name(), alignak_module.__dict__)
try:
instance = python_module.get_instance(alignak_module)
if not isinstance(instance, BaseModule): # pragma: no cover, simple protection
self.configuration_errors.append("Module %s instance is not a "
"BaseModule instance: %s"
% (alignak_module.get_name(),
type(instance)))
raise AttributeError
# pragma: no cover, simple protection
except Exception as exp: # pylint: disable=broad-except
logger.error("The module %s raised an exception on loading, I remove it!",
alignak_module.get_name())
logger.exception("Exception: %s", exp)
self.configuration_errors.append("The module %s raised an exception on "
"loading: %s, I remove it!"
% (alignak_module.get_name(), str(exp)))
else:
# Give the module the data to which daemon/module it is loaded into
instance.set_loaded_into(self.daemon.name)
self.instances.append(instance)
for instance in self.instances:
# External instances are not initialized now, but only when they are started
if not instance.is_external and not self.try_instance_init(instance):
# If the init failed, we put in in the restart queue
logger.warning("The module '%s' failed to initialize, "
"I will try to restart it later", instance.name)
self.set_to_restart(instance)
return self.instances | [
"Create, init and then returns the list of module instances that the caller needs.\n\n This method is called once the Python modules are loaded to initialize the modules.\n\n If an instance can't be created or initialized then only log is doneand that\n instance is skipped. The previous modules instance(s), if any, are all cleaned.\n\n :return: module instances list\n :rtype: list\n "
] |
Please provide a description of the function:def start_external_instances(self, late_start=False):
for instance in [i for i in self.instances if i.is_external]:
# But maybe the init failed a bit, so bypass this ones from now
if not self.try_instance_init(instance, late_start=late_start):
logger.warning("The module '%s' failed to init, I will try to restart it later",
instance.name)
self.set_to_restart(instance)
continue
# ok, init succeed
logger.info("Starting external module %s", instance.name)
instance.start() | [
"Launch external instances that are load correctly\n\n :param late_start: If late_start, don't look for last_init_try\n :type late_start: bool\n :return: None\n "
] |
Please provide a description of the function:def remove_instance(self, instance):
# External instances need to be close before (process + queues)
if instance.is_external:
logger.info("Request external process to stop for %s", instance.name)
instance.stop_process()
logger.info("External process stopped.")
instance.clear_queues(self.daemon.sync_manager)
# Then do not listen anymore about it
self.instances.remove(instance) | [
"Request to cleanly remove the given instance.\n If instance is external also shutdown it cleanly\n\n :param instance: instance to remove\n :type instance: object\n :return: None\n "
] |
Please provide a description of the function:def check_alive_instances(self):
# Only for external
for instance in self.instances:
if instance in self.to_restart:
continue
if instance.is_external and instance.process and not instance.process.is_alive():
logger.error("The external module %s died unexpectedly!", instance.name)
logger.info("Setting the module %s to restart", instance.name)
# We clean its queues, they are no more useful
instance.clear_queues(self.daemon.sync_manager)
self.set_to_restart(instance)
# Ok, no need to look at queue size now
continue
# Now look for maximum queue size. If above the defined value, the module may have
# a huge problem and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.daemon.max_queue_size == 0:
continue
# Check for module queue size
queue_size = 0
try:
queue_size = instance.to_q.qsize()
except Exception: # pylint: disable=broad-except
pass
if queue_size > self.daemon.max_queue_size:
logger.error("The module %s has a too important queue size (%s > %s max)!",
instance.name, queue_size, self.daemon.max_queue_size)
logger.info("Setting the module %s to restart", instance.name)
# We clean its queues, they are no more useful
instance.clear_queues(self.daemon.sync_manager)
self.set_to_restart(instance) | [
"Check alive instances.\n If not, log error and try to restart it\n\n :return: None\n "
] |
Please provide a description of the function:def try_to_restart_deads(self):
to_restart = self.to_restart[:]
del self.to_restart[:]
for instance in to_restart:
logger.warning("Trying to restart module: %s", instance.name)
if self.try_instance_init(instance):
logger.warning("Restarting %s...", instance.name)
# Because it is a restart, clean the module inner process reference
instance.process = None
# If it's an external module, it will start the process
instance.start()
# Ok it's good now :)
else:
# Will retry later...
self.to_restart.append(instance) | [
"Try to reinit and restart dead instances\n\n :return: None\n "
] |
Please provide a description of the function:def get_internal_instances(self, phase=None):
if phase is None:
return [instance for instance in self.instances if not instance.is_external]
return [instance for instance in self.instances
if not instance.is_external and phase in instance.phases and
instance not in self.to_restart] | [
"Get a list of internal instances (in a specific phase)\n\n If phase is None, return all internal instances whtever the phase\n\n :param phase: phase to filter (never used)\n :type phase:\n :return: internal instances list\n :rtype: list\n "
] |
Please provide a description of the function:def get_external_instances(self, phase=None):
if phase is None:
return [instance for instance in self.instances if instance.is_external]
return [instance for instance in self.instances
if instance.is_external and phase in instance.phases and
instance not in self.to_restart] | [
"Get a list of external instances (in a specific phase)\n\n If phase is None, return all external instances whtever the phase\n\n :param phase: phase to filter (never used)\n :type phase:\n :return: external instances list\n :rtype: list\n "
] |
Please provide a description of the function:def stop_all(self):
logger.info('Shutting down modules...')
# Ask internal to quit if they can
for instance in self.get_internal_instances():
if hasattr(instance, 'quit') and isinstance(instance.quit, collections.Callable):
instance.quit()
self.clear_instances([instance for instance in self.instances if instance.is_external]) | [
"Stop all module instances\n\n :return: None\n "
] |
Please provide a description of the function:def main():
parsed_configuration = AlignakConfigParser()
try:
parsed_configuration.parse()
except configparser.ParsingError as exp:
print("Environment file parsing error: %s", exp)
if parsed_configuration.export:
# Export Alignak version
print("export ALIGNAK_VERSION=%s" % (parsed_configuration.alignak_version)) | [
"\n Main function\n "
] |
Please provide a description of the function:def parse(self):
# pylint: disable=too-many-branches
# Search if some ini files existe in an alignak.d sub-directory
sub_directory = 'alignak.d'
dir_name = os.path.dirname(self.configuration_file)
dir_name = os.path.join(dir_name, sub_directory)
self.cfg_files = [self.configuration_file]
if os.path.exists(dir_name):
for root, _, walk_files in os.walk(dir_name, followlinks=True):
for found_file in walk_files:
if not re.search(r"\.ini$", found_file):
continue
self.cfg_files.append(os.path.join(root, found_file))
print("Loading configuration files: %s " % self.cfg_files)
# Read and parse the found configuration files
self.config = configparser.ConfigParser()
try:
self.config.read(self.cfg_files)
if self.config._sections == {}:
print("* bad formatted configuration file: %s " % self.configuration_file)
if self.embedded:
raise ValueError
sys.exit(2)
for section in self.config.sections():
if self.verbose:
print("- section: %s" % section)
for (key, value) in self.config.items(section):
inner_property = "%s.%s" % (section, key)
# Set object property
setattr(self, inner_property, value)
# Set environment variable
os.environ[inner_property] = value
if self.verbose:
print(" %s = %s" % (inner_property, value))
if self.export:
# Allowed shell variables may only contain: [a-zA-z0-9_]
inner_property = re.sub('[^0-9a-zA-Z]+', '_', inner_property)
inner_property = inner_property.upper()
print("export %s=%s" % (inner_property, cmd_quote(value)))
except configparser.ParsingError as exp:
print("* parsing error in config file : %s\n%s"
% (self.configuration_file, exp.message))
if self.embedded:
return False
sys.exit(3)
except configparser.InterpolationMissingOptionError as exp:
print("* incorrect or missing variable: %s" % str(exp))
if self.embedded:
return False
sys.exit(3)
if self.verbose:
print("Configuration file parsed correctly")
return True | [
"\n Check if some extra configuration files are existing in an `alignak.d` sub directory\n near the found configuration file.\n\n Parse the Alignak configuration file(s)\n\n Exit the script if some errors are encountered.\n\n :return: True/False\n "
] |
Please provide a description of the function:def write(self, env_file):
try:
with open(env_file, "w") as out_file:
self.config.write(out_file)
except Exception as exp: # pylint: disable=broad-except
print("Dumping environment file raised an error: %s. " % exp) | [
"\n Write the Alignak configuration to a file\n\n :param env_file: file name to dump the configuration\n :type env_file: str\n :return: True/False\n "
] |
Please provide a description of the function:def _search_sections(self, searched_sections=''):
found_sections = {}
# Get the daemons related properties
for section in self.config.sections():
if not section.startswith(searched_sections):
continue
if section not in found_sections:
found_sections.update({section: {'imported_from': self.configuration_file}})
for (key, value) in self.config.items(section):
found_sections[section].update({key: value})
return found_sections | [
"\n Search sections in the configuration which name starts with the provided search criteria\n :param searched_sections:\n :return: a dict containing the found sections and their parameters\n "
] |
Please provide a description of the function:def get_alignak_macros(self):
macros = self.get_alignak_configuration(macros=True)
sections = self._search_sections('pack.')
for name, _ in list(sections.items()):
section_macros = self.get_alignak_configuration(section=name, macros=True)
macros.update(section_macros)
return macros | [
"\n Get the Alignak macros.\n\n :return: a dict containing the Alignak macros\n "
] |
Please provide a description of the function:def get_alignak_configuration(self, section=SECTION_CONFIGURATION,
legacy_cfg=False, macros=False):
configuration = self._search_sections(section)
if section not in configuration:
return []
for prop, _ in list(configuration[section].items()):
# Only legacy configuration items
if legacy_cfg:
if not prop.startswith('cfg'):
configuration[section].pop(prop)
continue
# Only macro definitions
if macros:
if not prop.startswith('_') and not prop.startswith('$'):
configuration[section].pop(prop)
continue
# All values except legacy configuration and macros
if prop.startswith('cfg') or prop.startswith('_') or prop.startswith('$'):
configuration[section].pop(prop)
return configuration[section] | [
"\n Get the Alignak configuration parameters. All the variables included in\n the SECTION_CONFIGURATION section except the variables starting with 'cfg'\n and the macros.\n\n If `lecagy_cfg` is True, this function only returns the variables included in\n the SECTION_CONFIGURATION section except the variables starting with 'cfg'\n\n If `macros` is True, this function only returns the variables included in\n the SECTION_CONFIGURATION section that are considered as macros\n\n :param section: name of the sectio nto search for\n :type section: str\n :param legacy_cfg: only get the legacy cfg declarations\n :type legacy_cfg: bool\n :param macros: only get the macros declarations\n :type macros: bool\n :return: a dict containing the Alignak configuration parameters\n "
] |
Please provide a description of the function:def get_daemons(self, daemon_name=None, daemon_type=None):
if daemon_name is not None:
sections = self._search_sections('daemon.%s' % daemon_name)
if 'daemon.%s' % daemon_name in sections:
return sections['daemon.' + daemon_name]
return {}
if daemon_type is not None:
sections = self._search_sections('daemon.')
for name, daemon in list(sections.items()):
if 'type' not in daemon or not daemon['type'] == daemon_type:
sections.pop(name)
return sections
return self._search_sections('daemon.') | [
"\n Get the daemons configuration parameters\n\n If name is provided, get the configuration for this daemon, else,\n If type is provided, get the configuration for all the daemons of this type, else\n get the configuration of all the daemons.\n\n :param daemon_name: the searched daemon name\n :param daemon_type: the searched daemon type\n :return: a dict containing the daemon(s) configuration parameters\n "
] |
Please provide a description of the function:def get_modules(self, name=None, daemon_name=None, names_only=True):
if name is not None:
sections = self._search_sections('module.' + name)
if 'module.' + name in sections:
return sections['module.' + name]
return {}
if daemon_name is not None:
section = self.get_daemons(daemon_name)
if 'modules' in section and section['modules']:
modules = []
for module_name in section['modules'].split(','):
if names_only:
modules.append(module_name)
else:
modules.append(self.get_modules(name=module_name))
return modules
return []
return self._search_sections('module.') | [
"\n Get the modules configuration parameters\n\n If name is provided, get the configuration for this module, else,\n If daemon_name is provided, get the configuration for all the modules of this daemon, else\n get the configuration of all the modules.\n\n :param name: the searched module name\n :param daemon_name: the modules of this daemon\n :param names_only: if True only returns the modules names, else all the configuration data\n :return: a dict containing the module(s) configuration parameters\n "
] |
Please provide a description of the function:def copy_shell(self):
cls = self.__class__
new_i = cls() # create a new group
new_i.uuid = self.uuid # with the same id
# Copy all properties
for prop in cls.properties:
if hasattr(self, prop):
if prop in ['members', 'unknown_members']:
setattr(new_i, prop, [])
else:
setattr(new_i, prop, getattr(self, prop))
return new_i | [
"\n Copy the group properties EXCEPT the members.\n Members need to be filled after manually\n\n :return: Itemgroup object\n :rtype: alignak.objects.itemgroup.Itemgroup\n :return: None\n "
] |
Please provide a description of the function:def add_members(self, members):
if not isinstance(members, list):
members = [members]
if not getattr(self, 'members', None):
self.members = members
else:
self.members.extend(members) | [
"Add a new member to the members list\n\n :param members: member name\n :type members: str\n :return: None\n "
] |
Please provide a description of the function:def add_unknown_members(self, members):
if not isinstance(members, list):
members = [members]
if not hasattr(self, 'unknown_members'):
self.unknown_members = members
else:
self.unknown_members.extend(members) | [
"Add a new member to the unknown members list\n\n :param member: member name\n :type member: str\n :return: None\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
# Make members unique, remove duplicates
if self.members:
self.members = list(set(self.members))
if self.unknown_members:
for member in self.unknown_members:
msg = "[%s::%s] as %s, got unknown member '%s'" % (
self.my_type, self.get_name(), self.__class__.my_type, member
)
self.add_error(msg)
state = False
return super(Itemgroup, self).is_correct() and state | [
"\n Check if a group is valid.\n Valid mean all members exists, so list of unknown_members is empty\n\n :return: True if group is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get_initial_status_brok(self, extra=None):
# Here members is a list of identifiers and we need their names
if extra and isinstance(extra, Items):
members = []
for member_id in self.members:
member = extra[member_id]
members.append((member.uuid, member.get_name()))
extra = {'members': members}
return super(Itemgroup, self).get_initial_status_brok(extra=extra) | [
"\n Get a brok with the group properties\n\n `members` contains a list of uuid which we must provide the names. Thus we will replace\n the default provided uuid with the members short name. The `extra` parameter, if present,\n is containing the Items to search for...\n\n :param extra: monitoring items, used to recover members\n :type extra: alignak.objects.item.Items\n :return:Brok object\n :rtype: object\n "
] |
Please provide a description of the function:def check_dir(self, dirname):
try:
os.makedirs(dirname)
dir_stat = os.stat(dirname)
print("Created the directory: %s, stat: %s" % (dirname, dir_stat))
if not dir_stat.st_uid == self.uid:
os.chown(dirname, self.uid, self.gid)
os.chmod(dirname, 0o775)
dir_stat = os.stat(dirname)
print("Changed directory ownership and permissions: %s, stat: %s"
% (dirname, dir_stat))
self.pre_log.append(("DEBUG",
"Daemon '%s' directory %s checking... "
"User uid: %s, directory stat: %s."
% (self.name, dirname, os.getuid(), dir_stat)))
self.pre_log.append(("INFO",
"Daemon '%s' directory %s did not exist, I created it. "
"I set ownership for this directory to %s:%s."
% (self.name, dirname, self.user, self.group)))
except OSError as exp:
if exp.errno == errno.EEXIST and os.path.isdir(dirname):
# Directory still exists...
pass
else:
self.pre_log.append(("ERROR",
"Daemon directory '%s' did not exist, "
"and I could not create. Exception: %s"
% (dirname, exp)))
self.exit_on_error("Daemon directory '%s' did not exist, "
"and I could not create.'. Exception: %s"
% (dirname, exp), exit_code=3) | [
"Check and create directory\n\n :param dirname: file name\n :type dirname; str\n\n :return: None\n "
] |
Please provide a description of the function:def do_stop(self):
logger.info("Stopping %s...", self.name)
if self.sync_manager:
logger.info("Shutting down synchronization manager...")
self.sync_manager.shutdown()
self.sync_manager = None
# Maybe the modules manager is not even created!
if self.modules_manager:
logger.info("Shutting down modules manager...")
self.modules_manager.stop_all()
# todo: daemonize the process thanks to CherryPy plugin
if self.http_daemon:
logger.info("Shutting down HTTP daemon...")
if self.http_daemon.cherrypy_thread:
self.http_daemon.stop()
self.http_daemon = None
# todo: daemonize the process thanks to CherryPy plugin
if self.http_thread:
logger.info("Checking HTTP thread...")
# Let a few seconds to exit
self.http_thread.join(timeout=3)
if self.http_thread.is_alive(): # pragma: no cover, should never happen...
logger.warning("HTTP thread did not terminated. Force stopping the thread..")
# try:
# self.http_thread._Thread__stop() # pylint: disable=E1101
# except Exception as exp: # pylint: disable=broad-except
# print("Exception: %s" % exp)
else:
logger.debug("HTTP thread exited")
self.http_thread = None | [
"Execute the stop of this daemon:\n - request the daemon to stop\n - request the http thread to stop, else force stop the thread\n - Close the http socket\n - Shutdown the manager\n - Stop and join all started \"modules\"\n\n :return: None\n "
] |
Please provide a description of the function:def request_stop(self, message='', exit_code=0):
# Log an error message if exit code is not 0
# Force output to stderr
if exit_code:
if message:
logger.error(message)
try:
sys.stderr.write(message)
except Exception: # pylint: disable=broad-except
pass
logger.error("Sorry, I bail out, exit code: %d", exit_code)
try:
sys.stderr.write("Sorry, I bail out, exit code: %d" % exit_code)
except Exception: # pylint: disable=broad-except
pass
else:
if message:
logger.info(message)
self.unlink()
self.do_stop()
logger.info("Stopped %s.", self.name)
sys.exit(exit_code) | [
"Remove pid and stop daemon\n\n :return: None\n "
] |
Please provide a description of the function:def get_links_of_type(self, s_type=''):
satellites = {
'arbiter': getattr(self, 'arbiters', []),
'scheduler': getattr(self, 'schedulers', []),
'broker': getattr(self, 'brokers', []),
'poller': getattr(self, 'pollers', []),
'reactionner': getattr(self, 'reactionners', []),
'receiver': getattr(self, 'receivers', [])
}
if not s_type:
result = {}
for sat_type in satellites:
# if sat_type == self.type:
# continue
for sat_uuid in satellites[sat_type]:
result[sat_uuid] = satellites[sat_type][sat_uuid]
return result
if s_type in satellites:
return satellites[s_type]
return None | [
"Return the `s_type` satellite list (eg. schedulers)\n\n If s_type is None, returns a dictionary of all satellites, else returns the dictionary\n of the s_type satellites\n\n The returned dict is indexed with the satellites uuid.\n\n :param s_type: satellite type\n :type s_type: str\n :return: dictionary of satellites\n :rtype: dict\n "
] |
Please provide a description of the function:def daemon_connection_init(self, s_link, set_wait_new_conf=False):
logger.debug("Daemon connection initialization: %s %s", s_link.type, s_link.name)
# If the link is not not active, I do not try to initialize the connection, just useless ;)
if not s_link.active:
logger.warning("%s '%s' is not active, do not initialize its connection!",
s_link.type, s_link.name)
return False
# Create the daemon connection
s_link.create_connection()
# Get the connection running identifier - first client / server communication
logger.debug("[%s] Getting running identifier for '%s'", self.name, s_link.name)
# Assume the daemon should be alive and reachable
# because we are initializing the connection...
s_link.alive = True
s_link.reachable = True
got_a_running_id = None
for _ in range(0, s_link.max_check_attempts):
got_a_running_id = s_link.get_running_id()
if got_a_running_id:
s_link.last_connection = time.time()
if set_wait_new_conf:
s_link.wait_new_conf()
break
time.sleep(0.3)
return got_a_running_id | [
"Initialize a connection with the daemon for the provided satellite link\n\n Initialize the connection (HTTP client) to the daemon and get its running identifier.\n Returns True if it succeeds else if any error occur or the daemon is inactive\n it returns False.\n\n Assume the daemon should be reachable because we are initializing the connection...\n as such, force set the link reachable property\n\n If set_wait_new_conf is set, the daemon is requested to wait a new configuration if\n we get a running identifier. This is used by the arbiter when a new configuration\n must be dispatched\n\n NB: if the daemon is configured as passive, or if it is a daemon link that is\n inactive then it returns False without trying a connection.\n\n :param s_link: link of the daemon to connect to\n :type s_link: SatelliteLink\n :param set_wait_new_conf: if the daemon must got the wait new configuration state\n :type set_wait_new_conf: bool\n :return: True if the connection is established, else False\n "
] |
Please provide a description of the function:def do_main_loop(self):
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
# Increased on each loop turn
if self.loop_count is None:
self.loop_count = 0
# Daemon start timestamp
if self.start_time is None:
self.start_time = time.time()
# For the pause duration
logger.info("pause duration: %.2f", self.pause_duration)
# For the maximum expected loop duration
self.maximum_loop_duration = 1.1 * self.maximum_loop_duration
logger.info("maximum expected loop duration: %.2f", self.maximum_loop_duration)
# Treatments before starting the main loop...
self.do_before_loop()
elapsed_time = 0
logger.info("starting main loop: %.2f", self.start_time)
while not self.interrupted:
loop_start_ts = time.time()
# Maybe someone said we will stop...
if self.will_stop and not self.type == 'arbiter':
logger.debug("death-wait mode... waiting for death")
_, _ = self.make_a_pause(1.0)
continue
# Increment loop count
self.loop_count += 1
if self.log_loop:
logger.debug("--- %d", self.loop_count)
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.info("I got a new configuration...")
# Manage the new configuration
self.setup_new_conf()
# Trying to restore our related daemons lost connections
for satellite in list(self.get_links_of_type(s_type='').values()):
# Not for configuration disabled satellites
if not satellite.active:
continue
if not satellite.alive and not satellite.passive:
logger.info("Trying to restore connection for %s/%s...",
satellite.type, satellite.name)
if self.daemon_connection_init(satellite):
logger.info("Connection restored")
# Each loop turn, execute the daemon specific treatment...
# only if the daemon has a configuration to manage
if self.have_conf:
_ts = time.time()
self.do_loop_turn()
statsmgr.timer('loop-turn', time.time() - _ts)
else:
logger.info("+++ loop %d, I do not have a configuration", self.loop_count)
if self.daemon_monitoring and (self.loop_count % self.daemon_monitoring_period == 1):
perfdatas = []
my_process = psutil.Process()
with my_process.oneshot():
perfdatas.append("num_threads=%d" % my_process.num_threads())
statsmgr.counter("system.num_threads", my_process.num_threads())
# perfdatas.append("num_ctx_switches=%d" % my_process.num_ctx_switches())
perfdatas.append("num_fds=%d" % my_process.num_fds())
statsmgr.counter("system.num_fds", my_process.num_fds())
# perfdatas.append("num_handles=%d" % my_process.num_handles())
perfdatas.append("create_time=%d" % my_process.create_time())
perfdatas.append("cpu_num=%d" % my_process.cpu_num())
statsmgr.counter("system.cpu_num", my_process.cpu_num())
perfdatas.append("cpu_usable=%d" % len(my_process.cpu_affinity()))
statsmgr.counter("system.cpu_usable", len(my_process.cpu_affinity()))
perfdatas.append("cpu_percent=%.2f%%" % my_process.cpu_percent())
statsmgr.counter("system.cpu_percent", my_process.cpu_percent())
cpu_times_percent = my_process.cpu_times()
for key in cpu_times_percent._fields:
perfdatas.append("cpu_%s_time=%.2fs" % (key,
getattr(cpu_times_percent, key)))
statsmgr.counter("system.cpu_%s_time" % key,
getattr(cpu_times_percent, key))
memory = my_process.memory_full_info()
for key in memory._fields:
perfdatas.append("mem_%s=%db" % (key, getattr(memory, key)))
statsmgr.counter("system.mem_%s" % key, getattr(memory, key))
logger.debug("Daemon %s (%s), pid=%s, ppid=%s, status=%s, cpu/memory|%s",
self.name, my_process.name(), my_process.pid, my_process.ppid(),
my_process.status(), " ".join(perfdatas))
if self.activity_log_period and (self.loop_count % self.activity_log_period == 1):
logger.info("Daemon %s is living: loop #%s ;)", self.name, self.loop_count)
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.warning("The arbiter pushed a new configuration... ")
# Loop end
loop_end_ts = time.time()
loop_duration = loop_end_ts - loop_start_ts
pause = self.maximum_loop_duration - loop_duration
if loop_duration > self.maximum_loop_duration:
logger.info("The %s %s loop exceeded the maximum expected loop duration (%.2f). "
"The last loop needed %.2f seconds to execute. "
"You should try to reduce the load on this %s.",
self.type, self.name, self.maximum_loop_duration,
loop_duration, self.type)
# Make a very very short pause ...
pause = 0.01
# Pause the daemon execution to avoid too much load on the system
logger.debug("Before pause: timeout: %s", pause)
work, time_changed = self.make_a_pause(pause)
logger.debug("After pause: %.2f / %.2f, sleep time: %.2f",
work, time_changed, self.sleep_time)
if work > self.pause_duration:
logger.warning("Too much work during the pause (%.2f out of %.2f)! "
"The daemon should rest for a while... but one need to change "
"its code for this. Please log an issue in the project repository!",
work, self.pause_duration)
# self.pause_duration += 0.1
statsmgr.timer('sleep-time', self.sleep_time)
self.sleep_time = 0.0
# And now, the whole average time spent
elapsed_time = loop_end_ts - self.start_time
if self.log_loop:
logger.debug("Elapsed time, current loop: %.2f, from start: %.2f (%d loops)",
loop_duration, elapsed_time, self.loop_count)
statsmgr.gauge('loop-count', self.loop_count)
statsmgr.timer('run-duration', elapsed_time)
# Maybe someone said we will stop...
if self.will_stop:
if self.type == 'arbiter':
self.will_stop = False
else:
logger.info("The arbiter said we will stop soon - go to death-wait mode")
# Maybe someone asked us to die, if so, do it :)
if self.interrupted:
logger.info("Someone asked us to stop now")
continue
# If someone asked us a configuration reloading
if self.need_config_reload and self.type == 'arbiter':
logger.warning("Someone requested a configuration reload")
logger.info("Exiting daemon main loop")
return
# If someone asked us to dump memory, do it
if self.need_dump_environment:
logger.debug('Dumping memory')
self.dump_environment()
self.need_dump_environment = False
logger.info("stopped main loop: %.2f", time.time()) | [
"Main loop for an Alignak daemon\n\n :return: None\n "
] |
Please provide a description of the function:def do_load_modules(self, modules):
_ts = time.time()
logger.info("Loading modules...")
if self.modules_manager.load_and_init(modules):
if self.modules_manager.instances:
logger.info("I correctly loaded my modules: [%s]",
','.join([inst.name for inst in self.modules_manager.instances]))
else:
logger.info("I do not have any module")
else: # pragma: no cover, not with unit tests...
logger.error("Errors were encountered when checking and loading modules:")
for msg in self.modules_manager.configuration_errors:
logger.error(msg)
if self.modules_manager.configuration_warnings: # pragma: no cover, not tested
for msg in self.modules_manager.configuration_warnings:
logger.warning(msg)
statsmgr.gauge('modules.count', len(modules))
statsmgr.timer('modules.load-time', time.time() - _ts) | [
"Wrapper for calling load_and_init method of modules_manager attribute\n\n :param modules: list of modules that should be loaded by the daemon\n :return: None\n "
] |
Please provide a description of the function:def dump_environment(self):
# Dump the Alignak configuration to a temporary ini file
path = os.path.join(tempfile.gettempdir(),
'dump-env-%s-%s-%d.ini' % (self.type, self.name, int(time.time())))
try:
with open(path, "w") as out_file:
self.alignak_env.write(out_file)
except Exception as exp: # pylint: disable=broad-except
logger.error("Dumping daemon environment raised an error: %s. ", exp) | [
" Try to dump memory\n\n Not currently implemented feature\n\n :return: None\n "
] |
Please provide a description of the function:def change_to_workdir(self):
logger.info("Changing working directory to: %s", self.workdir)
self.check_dir(self.workdir)
try:
os.chdir(self.workdir)
except OSError as exp:
self.exit_on_error("Error changing to working directory: %s. Error: %s. "
"Check the existence of %s and the %s/%s account "
"permissions on this directory."
% (self.workdir, str(exp), self.workdir, self.user, self.group),
exit_code=3)
self.pre_log.append(("INFO", "Using working directory: %s" % os.path.abspath(self.workdir))) | [
"Change working directory to working attribute\n\n :return: None\n "
] |
Please provide a description of the function:def unlink(self):
logger.debug("Unlinking %s", self.pid_filename)
try:
os.unlink(self.pid_filename)
except OSError as exp:
logger.debug("Got an error unlinking our pid file: %s", exp) | [
"Remove the daemon's pid file\n\n :return: None\n "
] |
Please provide a description of the function:def check_shm():
import stat
shm_path = '/dev/shm'
if os.name == 'posix' and os.path.exists(shm_path):
# We get the access rights, and we check them
mode = stat.S_IMODE(os.lstat(shm_path)[stat.ST_MODE])
if not mode & stat.S_IWUSR or not mode & stat.S_IRUSR:
logger.critical("The directory %s is not writable or readable."
"Please make it read writable: %s", shm_path, shm_path)
print("The directory %s is not writable or readable."
"Please make it read writable: %s" % (shm_path, shm_path))
sys.exit(2) | [
" Check /dev/shm right permissions\n\n :return: None\n "
] |
Please provide a description of the function:def __open_pidfile(self, write=False):
# if problem on opening or creating file it'll be raised to the caller:
try:
self.pre_log.append(("DEBUG",
"Opening %s pid file: %s" % ('existing' if
os.path.exists(self.pid_filename)
else 'missing', self.pid_filename)))
# Windows do not manage the rw+ mode,
# so we must open in read mode first, then reopen it write mode...
if not write and os.path.exists(self.pid_filename):
self.fpid = open(self.pid_filename, 'r+')
else:
# If it doesn't exist too, we create it as void
self.fpid = open(self.pid_filename, 'w+')
except Exception as exp: # pylint: disable=broad-except
self.exit_on_error("Error opening pid file: %s. Error: %s. "
"Check the %s:%s account permissions to write this file."
% (self.pid_filename, str(exp), self.user, self.group), exit_code=3) | [
"Open pid file in read or write mod\n\n :param write: boolean to open file in write mod (true = write)\n :type write: bool\n :return: None\n "
] |
Please provide a description of the function:def check_parallel_run(self): # pragma: no cover, not with unit tests...
# TODO: other daemon run on nt
if os.name == 'nt': # pragma: no cover, not currently tested with Windows...
logger.warning("The parallel daemon check is not available on Windows")
self.__open_pidfile(write=True)
return
# First open the pid file in open mode
self.__open_pidfile()
try:
pid_var = self.fpid.readline().strip(' \r\n')
if pid_var:
pid = int(pid_var)
logger.info("Found an existing pid (%s): '%s'", self.pid_filename, pid_var)
else:
logger.debug("Not found an existing pid: %s", self.pid_filename)
return
except (IOError, ValueError) as err:
logger.warning("PID file is empty or has an invalid content: %s", self.pid_filename)
return
if pid == os.getpid():
self.pid = pid
return
try:
logger.debug("Testing if the process is running: '%s'", pid)
os.kill(pid, 0)
except OSError:
# consider any exception as a stale pid file.
# this includes :
# * PermissionError when a process with same pid exists but is executed by another user
# * ProcessLookupError: [Errno 3] No such process
self.pre_log.append(("DEBUG", "No former instance to replace"))
logger.info("A stale pid file exists, reusing the same file")
return
if not self.do_replace:
self.exit_on_error("A valid pid file still exists (pid=%s) and "
"I am not allowed to replace. Exiting!" % pid, exit_code=3)
self.pre_log.append(("DEBUG", "Replacing former instance: %d" % pid))
try:
pgid = os.getpgid(pid)
# SIGQUIT to terminate and dump core
os.killpg(pgid, signal.SIGQUIT)
except os.error as err:
if err.errno != errno.ESRCH:
raise
self.fpid.close()
# TODO: give some time to wait that previous instance finishes?
time.sleep(1)
# we must also reopen the pid file in write mode
# because the previous instance should have deleted it!!
self.__open_pidfile(write=True) | [
"Check (in pid file) if there isn't already a daemon running.\n If yes and do_replace: kill it.\n Keep in self.fpid the File object to the pid file. Will be used by writepid.\n\n :return: None\n "
] |
Please provide a description of the function:def write_pid(self, pid):
self.fpid.seek(0)
self.fpid.truncate()
self.fpid.write("%d" % pid)
self.fpid.close()
del self.fpid | [
" Write pid to the pid file\n\n :param pid: pid of the process\n :type pid: None | int\n :return: None\n "
] |
Please provide a description of the function:def close_fds(self, skip_close_fds): # pragma: no cover, not with unit tests...
# First we manage the file descriptor, because debug file can be
# relative to pwd
max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if max_fds == resource.RLIM_INFINITY:
max_fds = 1024
self.pre_log.append(("DEBUG", "Maximum file descriptors: %d" % max_fds))
# Iterate through and close all file descriptors.
for file_d in range(0, max_fds):
if file_d in skip_close_fds:
self.pre_log.append(("INFO", "Do not close fd: %s" % file_d))
continue
try:
os.close(file_d)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass | [
"Close all the process file descriptors.\n Skip the descriptors present in the skip_close_fds list\n\n :param skip_close_fds: list of file descriptor to preserve from closing\n :type skip_close_fds: list\n :return: None\n "
] |
Please provide a description of the function:def daemonize(self): # pragma: no cover, not for unit tests...
self.pre_log.append(("INFO", "Daemonizing..."))
print("Daemonizing %s..." % self.name)
# Set umask
os.umask(UMASK)
# Close all file descriptors except the one we need
self.pre_log.append(("DEBUG", "Closing file descriptors..."))
preserved_fds = [1, 2, self.fpid.fileno()]
if os.getenv('ALIGNAK_DO_NOT_PRESERVE_STDOUT', None):
preserved_fds = [self.fpid.fileno()]
if self.debug:
# Do not close stdout nor stderr
preserved_fds.extend([1, 2])
self.close_fds(preserved_fds)
# Now the double fork magic (fork/setsid/fork)
def fork_then_exit_parent(level, error_message):
try:
pid = os.fork()
if pid > 0:
if level == 2:
# When forking the grandchild, write our own pid
self.write_pid(pid)
os._exit(0)
except OSError as exc:
raise Exception("Fork error: %s [%d], exception: %s"
% (error_message, exc.errno, str(exc)))
fork_then_exit_parent(level=1, error_message="Failed first fork")
os.setsid()
fork_then_exit_parent(level=2, error_message="Failed second fork")
self.pid = os.getpid()
self.pre_log.append(("INFO", "We are now fully daemonized :) pid=%d" % self.pid))
return True | [
"Go in \"daemon\" mode: close unused fds, redirect stdout/err,\n chdir, umask, fork-setsid-fork-writepid\n Do the double fork to properly go daemon\n\n This is 'almost' as recommended by PEP3143 but it would be better to rewrite this\n daemonization thanks to the python-daemon library!\n\n :return: None\n ",
" Fork a child process, then exit the parent process.\n :param error_message: Message for the exception in case of a\n detach failure.\n :return: ``None``.\n :raise Exception: If the fork fails.\n "
] |
Please provide a description of the function:def do_daemon_init_and_start(self, set_proc_title=True):
if set_proc_title:
self.set_proctitle(self.name)
# Change to configured user/group account
self.change_to_user_group()
# Change the working directory
self.change_to_workdir()
# Check if I am still running
self.check_parallel_run()
# If we must daemonize, let's do it!
if self.is_daemon:
if not self.daemonize():
logger.error("I could not daemonize myself :(")
return False
else:
# Else, I set my own pid as the reference one
self.write_pid(os.getpid())
# # TODO: check if really necessary!
# # -------
# # Set ownership on some default log files. It may happen that these default
# # files are owned by a privileged user account
# try:
# for log_file in ['alignak.log', 'alignak-events.log']:
# if os.path.exists('/tmp/%s' % log_file):
# with open('/tmp/%s' % log_file, "w") as file_log_file:
# os.fchown(file_log_file.fileno(), self.uid, self.gid)
# if os.path.exists('/tmp/monitoring-log/%s' % log_file):
# with open('/tmp/monitoring-log/%s' % log_file, "w") as file_log_file:
# os.fchown(file_log_file.fileno(), self.uid, self.gid)
# except Exception as exp: # pylint: disable=broad-except
# # pragma: no cover
# print("Could not set default log files ownership, exception: %s" % str(exp))
# Configure the daemon logger
self.setup_alignak_logger()
# Setup the Web Services daemon
if not self.setup_communication_daemon():
logger.error("I could not setup my communication daemon :(")
return False
# Creating synchonisation manager (inter-daemon queues...)
self.sync_manager = self._create_manager()
# Start the CherryPy server through a detached thread
logger.info("Starting http_daemon thread")
# pylint: disable=bad-thread-instantiation
self.http_thread = threading.Thread(target=self.http_daemon_thread,
name='%s-http_thread' % self.name)
# Setting the thread as a daemon allows to Ctrl+C to kill the main daemon
self.http_thread.daemon = True
self.http_thread.start()
# time.sleep(1)
logger.info("HTTP daemon thread started")
return True | [
"Main daemon function.\n Clean, allocates, initializes and starts all necessary resources to go in daemon mode.\n\n The set_proc_title parameter is mainly useful for the Alignak unit tests.\n This to avoid changing the test process name!\n\n :param set_proc_title: if set (default), the process title is changed to the daemon name\n :type set_proc_title: bool\n :return: False if the HTTP daemon can not be initialized, else True\n "
] |
Please provide a description of the function:def setup_communication_daemon(self):
# pylint: disable=no-member
ca_cert = ssl_cert = ssl_key = server_dh = None
# The SSL part
if self.use_ssl:
ssl_cert = os.path.abspath(self.server_cert)
if not os.path.exists(ssl_cert):
self.exit_on_error("The configured SSL server certificate file '%s' "
"does not exist." % ssl_cert, exit_code=2)
logger.info("Using SSL server certificate: %s", ssl_cert)
ssl_key = os.path.abspath(self.server_key)
if not os.path.exists(ssl_key):
self.exit_on_error("The configured SSL server key file '%s' "
"does not exist." % ssl_key, exit_code=2)
logger.info("Using SSL server key: %s", ssl_key)
if self.server_dh:
server_dh = os.path.abspath(self.server_dh)
logger.info("Using ssl dh cert file: %s", server_dh)
self.exit_on_error("Sorry, but using a DH configuration "
"is not currently supported!", exit_code=2)
if self.ca_cert:
ca_cert = os.path.abspath(self.ca_cert)
logger.info("Using ssl ca cert file: %s", ca_cert)
if self.hard_ssl_name_check:
logger.info("Enabling hard SSL server name verification")
# Let's create the HTTPDaemon, it will be started later
# pylint: disable=E1101
try:
logger.info('Setting up HTTP daemon (%s:%d), %d threads',
self.host, self.port, self.thread_pool_size)
self.http_daemon = HTTPDaemon(self.host, self.port, self.http_interface,
self.use_ssl, ca_cert, ssl_key,
ssl_cert, server_dh, self.thread_pool_size,
self.log_cherrypy, self.favicon)
except PortNotFree:
logger.error('The HTTP daemon port (%s:%d) is not free...', self.host, self.port)
return False
except Exception as exp: # pylint: disable=broad-except
print('Setting up HTTP daemon, exception: %s', str(exp))
logger.exception('Setting up HTTP daemon, exception: %s', str(exp))
return False
return True | [
" Setup HTTP server daemon to listen\n for incoming HTTP requests from other Alignak daemons\n\n :return: True if initialization is ok, else False\n "
] |
Please provide a description of the function:def change_to_user_group(self):
# TODO: change user on nt
if os.name == 'nt': # pragma: no cover, no Windows implementation currently
logger.warning("You can't change user on this system")
return
if (self.user == 'root' or self.group == 'root') and not self.idontcareaboutsecurity:
logger.error("You want the application to run with the root account credentials? "
"It is not a safe configuration!")
logger.error("If you really want it, set: 'idontcareaboutsecurity=1' "
"in the configuration file")
self.exit_on_error("You want the application to run with the root account credentials? "
"It is not a safe configuration! If you really want it, "
"set: 'idontcareaboutsecurity=1' in the configuration file.",
exit_code=3)
uid = None
try:
uid = getpwnam(self.user).pw_uid
except KeyError:
logger.error("The required user %s is unknown", self.user)
gid = None
try:
gid = getgrnam(self.group).gr_gid
except KeyError:
logger.error("The required group %s is unknown", self.group)
if uid is None or gid is None:
self.exit_on_error("Configured user/group (%s/%s) are not valid."
% (self.user, self.group), exit_code=1)
# Maybe the os module got the initgroups function. If so, try to call it.
# Do this when we are still root
logger.info('Trying to initialize additional groups for the daemon')
if hasattr(os, 'initgroups'):
try:
os.initgroups(self.user, gid)
except OSError as err:
logger.warning('Cannot call the additional groups setting with initgroups: %s',
err.strerror)
elif hasattr(os, 'setgroups'): # pragma: no cover, not with unit tests on Travis
# Else try to call the setgroups if it exists...
groups = [gid] + \
[group.gr_gid for group in get_all_groups() if self.user in group.gr_mem]
try:
os.setgroups(groups)
except OSError as err:
logger.warning('Cannot call the additional groups setting with setgroups: %s',
err.strerror)
try:
# First group, then user :)
os.setregid(gid, gid)
os.setreuid(uid, uid)
except OSError as err: # pragma: no cover, not with unit tests...
self.exit_on_error("Cannot change user/group to %s/%s (%s [%d]). Exiting..."
% (self.user, self.group, err.strerror, err.errno), exit_code=3) | [
" Change to configured user/group for the running program.\n If user/group are not valid, we exit with code 1\n If change failed we exit with code 2\n\n :return: None\n "
] |
Please provide a description of the function:def manage_signal(self, sig, frame): # pylint: disable=unused-argument
logger.info("received a signal: %s", SIGNALS_TO_NAMES_DICT[sig])
if sig == signal.SIGUSR1: # if USR1, ask a memory dump
self.need_dump_environment = True
elif sig == signal.SIGUSR2: # if USR2, ask objects dump
self.need_objects_dump = True
elif sig == signal.SIGHUP: # if HUP, reload the monitoring configuration
self.need_config_reload = True
else: # Ok, really ask us to die :)
logger.info("request to stop the daemon")
self.interrupted = True | [
"Manage signals caught by the daemon\n signal.SIGUSR1 : dump_environment\n signal.SIGUSR2 : dump_object (nothing)\n signal.SIGTERM, signal.SIGINT : terminate process\n\n :param sig: signal caught by daemon\n :type sig: str\n :param frame: current stack frame\n :type frame:\n :return: None\n "
] |
Please provide a description of the function:def set_proctitle(self, daemon_name=None):
logger.debug("Setting my process name: %s", daemon_name)
if daemon_name:
setproctitle("alignak-%s %s" % (self.type, daemon_name))
if self.modules_manager:
self.modules_manager.set_daemon_name(daemon_name)
else:
setproctitle("alignak-%s" % self.type) | [
"Set the proctitle of the daemon\n\n :param daemon_name: daemon instance name (eg. arbiter-master). If not provided, only the\n daemon type (eg. arbiter) will be used for the process title\n :type daemon_name: str\n :return: None\n "
] |
Please provide a description of the function:def get_header(self, configuration=False):
header = [u"-----",
u" █████╗ ██╗ ██╗ ██████╗ ███╗ ██╗ █████╗ ██╗ ██╗",
u" ██╔══██╗██║ ██║██╔════╝ ████╗ ██║██╔══██╗██║ ██╔╝",
u" ███████║██║ ██║██║ ███╗██╔██╗ ██║███████║█████╔╝ ",
u" ██╔══██║██║ ██║██║ ██║██║╚██╗██║██╔══██║██╔═██╗ ",
u" ██║ ██║███████╗██║╚██████╔╝██║ ╚████║██║ ██║██║ ██╗",
u" ╚═╝ ╚═╝╚══════╝╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝",
u"-----",
u"Alignak %s - %s daemon" % (VERSION, self.name),
u"Copyright (c) 2015-2019: Alignak Team",
u"License: AGPL",
u"-----",
u"Python: %s.%s" % (sys.version_info.major, sys.version_info.minor),
u"-----",
u"My pid: %s" % self.pid]
if configuration:
header = ["My configuration: "]
for prop, _ in sorted(self.properties.items()):
header.append(" - %s=%s" % (prop, getattr(self, prop)))
return header | [
"Get the log file header\n\n If configuration is True, this returns the daemon configuration\n\n :return: A string list containing project name, daemon name, version, licence etc.\n :rtype: list\n "
] |
Please provide a description of the function:def http_daemon_thread(self):
logger.debug("HTTP thread running")
try:
# This function is a blocking function serving HTTP protocol
self.http_daemon.run()
except PortNotFree as exp:
logger.exception('The HTTP daemon port is not free: %s', exp)
raise
except Exception as exp: # pylint: disable=broad-except
self.exit_on_exception(exp)
logger.debug("HTTP thread exiting") | [
"Main function of the http daemon thread will loop forever unless we stop the root daemon\n\n The main thing is to have a pool of X concurrent requests for the http_daemon,\n so \"no_lock\" calls can always be directly answer without having a \"locked\" version to\n finish. This is achieved thanks to the CherryPy thread pool.\n\n This function is threaded to be detached from the main process as such it will not block\n the process main loop..\n :return: None\n "
] |
Please provide a description of the function:def make_a_pause(self, timeout=0.0001, check_time_change=True):
if timeout == 0:
timeout = 0.0001
if not check_time_change:
# Time to sleep
time.sleep(timeout)
self.sleep_time += timeout
return 0, 0
# Check is system time changed
before = time.time()
time_changed = self.check_for_system_time_change()
after = time.time()
elapsed = after - before
if elapsed > timeout:
return elapsed, time_changed
# Time to sleep
time.sleep(timeout - elapsed)
# Increase our sleep time for the time we slept
before += time_changed
self.sleep_time += time.time() - before
return elapsed, time_changed | [
" Wait up to timeout and check for system time change.\n\n This function checks if the system time changed since the last call. If so,\n the difference is returned to the caller.\n The duration of this call is removed from the timeout. If this duration is\n greater than the required timeout, no sleep is executed and the extra time\n is returned to the caller\n\n If the required timeout was overlapped, then the first return value will be\n greater than the required timeout.\n\n If the required timeout is null, then the timeout value is set as a very short time\n to keep a nice behavior to the system CPU ;)\n\n :param timeout: timeout to wait for activity\n :type timeout: float\n :param check_time_change: True (default) to check if the system time changed\n :type check_time_change: bool\n :return:Returns a 2-tuple:\n * first value is the time spent for the time change check\n * second value is the time change difference\n :rtype: tuple\n "
] |
Please provide a description of the function:def wait_for_initial_conf(self, timeout=1.0):
logger.info("Waiting for initial configuration")
# Arbiter do not already set our have_conf param
_ts = time.time()
while not self.new_conf and not self.interrupted:
# Make a pause and check if the system time changed
_, _ = self.make_a_pause(timeout, check_time_change=True)
if not self.interrupted:
logger.info("Got initial configuration, waited for: %.2f seconds", time.time() - _ts)
statsmgr.timer('configuration.initial', time.time() - _ts)
else:
logger.info("Interrupted before getting the initial configuration") | [
"Wait initial configuration from the arbiter.\n Basically sleep 1.0 and check if new_conf is here\n\n :param timeout: timeout to wait\n :type timeout: int\n :return: None\n "
] |
Please provide a description of the function:def watch_for_new_conf(self, timeout=0):
logger.debug("Watching for a new configuration, timeout: %s", timeout)
self.make_a_pause(timeout=timeout, check_time_change=False)
return any(self.new_conf) | [
"Check if a new configuration was sent to the daemon\n\n This function is called on each daemon loop turn. Basically it is a sleep...\n\n If a new configuration was posted, this function returns True\n\n :param timeout: timeout to wait. Default is no wait time.\n :type timeout: float\n :return: None\n "
] |
Please provide a description of the function:def hook_point(self, hook_name, handle=None):
full_hook_name = 'hook_' + hook_name
for module in self.modules_manager.instances:
_ts = time.time()
if not hasattr(module, full_hook_name):
continue
fun = getattr(module, full_hook_name)
try:
fun(handle if handle is not None else self)
# pylint: disable=broad-except
except Exception as exp: # pragma: no cover, never happen during unit tests...
logger.warning('The instance %s raised an exception %s. I disabled it,'
' and set it to restart later', module.name, str(exp))
logger.exception('Exception %s', exp)
self.modules_manager.set_to_restart(module)
else:
statsmgr.timer('hook.%s.%s' % (hook_name, module.name), time.time() - _ts) | [
"Used to call module function that may define a hook function for hook_name\n\n Available hook points:\n - `tick`, called on each daemon loop turn\n - `save_retention`; called by the scheduler when live state\n saving is to be done\n - `load_retention`; called by the scheduler when live state\n restoring is necessary (on restart)\n - `get_new_actions`; called by the scheduler before adding the actions to be executed\n - `early_configuration`; called by the arbiter when it begins parsing the configuration\n - `read_configuration`; called by the arbiter when it read the configuration\n - `late_configuration`; called by the arbiter when it finishes parsing the configuration\n\n As a default, the `handle` parameter provided to the hooked function is the\n caller Daemon object. The scheduler will provide its own instance when it call this\n function.\n\n :param hook_name: function name we may hook in module\n :type hook_name: str\n :param handle: parameter to provide to the hook function\n :type: handle: alignak.Satellite\n :return: None\n "
] |
Please provide a description of the function:def get_id(self, details=False): # pylint: disable=unused-argument
# Modules information
res = {
"alignak": getattr(self, 'alignak_name', 'unknown'),
"type": getattr(self, 'type', 'unknown'),
"name": getattr(self, 'name', 'unknown'),
"version": VERSION
}
return res | [
"Get daemon identification information\n\n :return: A dict with the following structure\n ::\n {\n \"alignak\": selfAlignak instance name\n \"type\": daemon type\n \"name\": daemon name\n \"version\": Alignak version\n }\n\n :rtype: dict\n "
] |
Please provide a description of the function:def get_daemon_stats(self, details=False): # pylint: disable=unused-argument
res = self.get_id()
res.update({
"program_start": self.program_start,
"spare": self.spare,
'counters': {},
'metrics': [],
'modules': {
'internal': {}, 'external': {}
}
})
# Modules information
modules = res['modules']
counters = res['counters']
counters['modules'] = len(self.modules_manager.instances)
# first get data for all internal modules
for instance in self.modules_manager.get_internal_instances():
state = {True: 'ok', False: 'stopped'}[(instance
not in self.modules_manager.to_restart)]
modules['internal'][instance.name] = {'name': instance.name, 'state': state}
# Same but for external ones
for instance in self.modules_manager.get_external_instances():
state = {True: 'ok', False: 'stopped'}[(instance
not in self.modules_manager.to_restart)]
modules['internal'][instance.name] = {'name': instance.name, 'state': state}
return res | [
"Get state of modules and create a scheme for stats data of daemon\n This may be overridden in subclasses (and it is...)\n\n :return: A dict with the following structure\n ::\n {\n 'modules': {\n 'internal': {'name': \"MYMODULE1\", 'state': 'ok'},\n 'external': {'name': \"MYMODULE2\", 'state': 'stopped'},\n },\n And some extra information, see the source code below...\n }\n\n These information are completed with the data provided by the get_id function\n which provides the daemon identification\n\n :rtype: dict\n "
] |
Please provide a description of the function:def exit_ok(self, message, exit_code=None):
logger.info("Exiting...")
if message:
logger.info("-----")
logger.error("Exit message: %s", message)
logger.info("-----")
self.request_stop()
if exit_code is not None:
exit(exit_code) | [
"Log a message and exit\n\n :param exit_code: if not None, exit with the provided value as exit code\n :type exit_code: int\n :param message: message for the exit reason\n :type message: str\n :return: None\n "
] |
Please provide a description of the function:def exit_on_error(self, message, exit_code=1):
# pylint: disable=no-self-use
log = "I got an unrecoverable error. I have to exit."
if message:
log += "\n-----\nError message: %s" % message
print("Error message: %s" % message)
log += "-----\n"
log += "You can get help at https://github.com/Alignak-monitoring/alignak\n"
log += "If you think this is a bug, create a new issue including as much " \
"details as possible (version, configuration,...)"
if exit_code is not None:
exit(exit_code) | [
"Log generic message when getting an error and exit\n\n :param exit_code: if not None, exit with the provided value as exit code\n :type exit_code: int\n :param message: message for the exit reason\n :type message: str\n :return: None\n "
] |
Please provide a description of the function:def exit_on_exception(self, raised_exception, message='', exit_code=99):
self.exit_on_error(message=message, exit_code=None)
logger.critical("-----\nException: %s\nBack trace of the error:\n%s",
str(raised_exception), traceback.format_exc())
exit(exit_code) | [
"Log generic message when getting an unrecoverable error\n\n :param raised_exception: raised Exception\n :type raised_exception: Exception\n :param message: message for the exit reason\n :type message: str\n :param exit_code: exit with the provided value as exit code\n :type exit_code: int\n :return: None\n "
] |
Please provide a description of the function:def get_objects_from_from_queues(self):
_t0 = time.time()
had_some_objects = False
for module in self.modules_manager.get_external_instances():
queue = module.from_q
if not queue:
continue
while True:
queue_size = queue.qsize()
if queue_size:
statsmgr.gauge('queues.from.%s.count' % module.get_name(), queue_size)
try:
obj = queue.get_nowait()
except Full:
logger.warning("Module %s from queue is full", module.get_name())
except Empty:
break
except (IOError, EOFError) as exp:
logger.warning("Module %s from queue is no more available: %s",
module.get_name(), str(exp))
except Exception as exp: # pylint: disable=broad-except
logger.error("An external module queue got a problem '%s'", str(exp))
else:
had_some_objects = True
self.add(obj)
statsmgr.timer('queues.time', time.time() - _t0)
return had_some_objects | [
" Get objects from \"from\" queues and add them.\n\n :return: True if we got something in the queue, False otherwise.\n :rtype: bool\n "
] |
Please provide a description of the function:def setup_alignak_logger(self):
# Configure the daemon logger
try:
# Make sure that the log directory is existing
self.check_dir(self.logdir)
setup_logger(logger_configuration_file=self.logger_configuration,
log_dir=self.logdir, process_name=self.name,
log_file=self.log_filename)
if self.debug:
# Force the global logger at DEBUG level
set_log_level('DEBUG')
logger.info("-----")
logger.info("Daemon log level set to a minimum of DEBUG")
logger.info("-----")
elif self.verbose:
# Force the global logger at INFO level
set_log_level('INFO')
logger.info("-----")
logger.info("Daemon log level set to a minimum of INFO")
logger.info("-----")
elif self.log_level:
# Force the global logger at provided level
set_log_level(self.log_level)
logger.info("-----")
logger.info("Daemon log level set to %s", self.log_level)
logger.info("-----")
except Exception as exp: # pylint: disable=broad-except
print("***** %s - exception when setting-up the logger: %s" % (self.name, exp))
self.exit_on_exception(exp, message="Logger configuration error!")
logger.debug("Alignak daemon logger configured")
for line in self.get_header():
logger.info("- %s", line)
# Log daemon configuration
for line in self.get_header(configuration=True):
logger.debug("- %s", line)
# We can now output some previously silenced debug output
if self.pre_log:
logger.debug("--- Start - Log prior to our configuration:")
for level, message in self.pre_log:
fun_level = level.lower()
getattr(logger, fun_level)("- %s", message)
# if level.lower() == "debug":
# logger.debug(message)
# elif level.lower() == "info":
# logger.info(message)
# elif level.lower() == "warning":
# logger.warning(message)
logger.debug("--- Stop - Log prior to our configuration") | [
" Setup alignak logger:\n - with the daemon log configuration properties\n - configure the global daemon handler (root logger)\n - log the daemon Alignak header\n\n - configure the global Alignak monitoring log\n\n This function is called very early on daemon start. The daemon is not yet forked and\n may still run with a high privileged user account. This is why, the log file ownership\n may be set accordingly to the running user account.\n\n :return: None\n "
] |
Please provide a description of the function:def enter(self, timeperiods, hosts, services):
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
self.is_in_effect = True
if self.fixed is False:
now = time.time()
self.real_end_time = now + self.duration
item.scheduled_downtime_depth += 1
item.in_scheduled_downtime = True
if item.scheduled_downtime_depth == 1:
item.raise_enter_downtime_log_entry()
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = timeperiods[item.notification_period]
# Notification author data
# todo: note that alias and name are not implemented yet
author_data = {
'author': self.author, 'author_name': u'Not available',
'author_alias': u'Not available', 'author_comment': self.comment
}
item.create_notifications('DOWNTIMESTART', notification_period, hosts, services,
author_data=author_data)
if self.ref in hosts:
broks.append(self.get_raise_brok(item.get_name()))
# For an host, acknowledge the host problem (and its services problems)
# Acknowledge the host with a sticky ack and notifications
# The acknowledge will expire at the same time as the downtime end
item.acknowledge_problem(notification_period, hosts, services, 2, 1, "Alignak",
"Acknowledged because of an host downtime")
else:
broks.append(self.get_raise_brok(item.host_name, item.get_name()))
for downtime_id in self.activate_me:
for host in hosts:
if downtime_id in host.downtimes:
downtime = host.downtimes[downtime_id]
broks.extend(downtime.enter(timeperiods, hosts, services))
for service in services:
if downtime_id in service.downtimes:
downtime = service.downtimes[downtime_id]
broks.extend(downtime.enter(timeperiods, hosts, services))
return broks | [
"Set ref in scheduled downtime and raise downtime log entry (start)\n\n :param hosts: hosts objects to get item ref\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects to get item ref\n :type services: alignak.objects.service.Services\n :return: broks\n :rtype: list of broks\n "
] |
Please provide a description of the function:def exit(self, timeperiods, hosts, services):
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
# If not is_in_effect means that ot was probably a flexible downtime which was
# not triggered. In this case, nothing special to do...
if self.is_in_effect is True:
# This was a fixed or a flexible+triggered downtime
self.is_in_effect = False
item.scheduled_downtime_depth -= 1
if item.scheduled_downtime_depth == 0:
item.raise_exit_downtime_log_entry()
notification_period = timeperiods[item.notification_period]
# Notification author data
# todo: note that alias and name are not implemented yet
author_data = {
'author': self.author, 'author_name': u'Not available',
'author_alias': u'Not available', 'author_comment': self.comment
}
item.create_notifications(u'DOWNTIMEEND', notification_period, hosts, services,
author_data=author_data)
item.in_scheduled_downtime = False
if self.ref in hosts:
broks.append(self.get_expire_brok(item.get_name()))
else:
broks.append(self.get_expire_brok(item.host_name, item.get_name()))
item.del_comment(self.comment_id)
self.can_be_deleted = True
# when a downtime ends and the concerned item was a problem
# a notification should be sent with the next critical check
# So we should set a flag here which informs the consume_result function
# to send a notification
item.in_scheduled_downtime_during_last_check = True
return broks | [
"Remove ref in scheduled downtime and raise downtime log entry (exit)\n\n :param hosts: hosts objects to get item ref\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects to get item ref\n :type services: alignak.objects.service.Services\n :return: [], always | None\n :rtype: list\n "
] |
Please provide a description of the function:def cancel(self, timeperiods, hosts, services):
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
self.is_in_effect = False
item.scheduled_downtime_depth -= 1
if item.scheduled_downtime_depth == 0:
item.raise_cancel_downtime_log_entry()
item.in_scheduled_downtime = False
if self.ref in hosts:
broks.append(self.get_expire_brok(item.get_name()))
else:
broks.append(self.get_expire_brok(item.host_name, item.get_name()))
self.del_automatic_comment(item)
self.can_be_deleted = True
item.in_scheduled_downtime_during_last_check = True
# Nagios does not notify on canceled downtimes
# res.extend(self.ref.create_notifications('DOWNTIMECANCELLED'))
# Also cancel other downtimes triggered by me
for downtime in self.activate_me:
broks.extend(downtime.cancel(timeperiods, hosts, services))
return broks | [
"Remove ref in scheduled downtime and raise downtime log entry (cancel)\n\n :param hosts: hosts objects to get item ref\n :type hosts: alignak.objects.host.Hosts\n :param services: services objects to get item ref\n :type services: alignak.objects.service.Services\n :return: [], always\n :rtype: list\n "
] |
Please provide a description of the function:def add_automatic_comment(self, ref):
if self.fixed is True:
text = (DOWNTIME_FIXED_MESSAGE % (ref.my_type,
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.start_time)),
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.end_time)),
ref.my_type))
else:
hours, remainder = divmod(self.duration, 3600)
minutes, _ = divmod(remainder, 60)
text = (DOWNTIME_FLEXIBLE_MESSAGE % (ref.my_type,
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.start_time)),
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.end_time)),
hours, minutes, ref.my_type))
data = {
'comment': text,
'comment_type': 1 if ref.my_type == 'host' else 2,
'entry_type': 2,
'source': 0,
'expires': False,
'ref': ref.uuid
}
comment = Comment(data)
self.comment_id = comment.uuid
ref.comments[comment.uuid] = comment
return comment | [
"Add comment on ref for downtime\n\n :param ref: the host/service we want to link a comment to\n :type ref: alignak.objects.schedulingitem.SchedulingItem\n\n :return: None\n "
] |
Please provide a description of the function:def get_raise_brok(self, host_name, service_name=''):
data = self.serialize()
data['host'] = host_name
if service_name != '':
data['service'] = service_name
return Brok({'type': 'downtime_raise', 'data': data}) | [
"Get a start downtime brok\n\n :param host_name: host concerned by the downtime\n :type host_name\n :param service_name: service concerned by the downtime\n :type service_name\n :return: brok with wanted data\n :rtype: alignak.brok.Brok\n "
] |
Please provide a description of the function:def get_expire_brok(self, host_name, service_name=''):
data = self.serialize()
data['host'] = host_name
if service_name != '':
data['service'] = service_name
return Brok({'type': 'downtime_expire', 'data': data}) | [
"Get an expire downtime brok\n\n :param host_name: host concerned by the downtime\n :type host_name\n :param service_name: service concerned by the downtime\n :type service_name\n :return: brok with wanted data\n :rtype: alignak.brok.Brok\n "
] |
Please provide a description of the function:def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in list(cls.properties.items()):
# Is this property intended for broking?
# if 'fill_brok' in entry[prop]:
if brok_type in entry.fill_brok:
if hasattr(self, prop):
data[prop] = getattr(self, prop) | [
"\n Add properties to data if fill_brok of these class properties\n is same as brok_type\n\n :param data: dictionnary of this command\n :type data: dict\n :param brok_type: type of brok\n :type brok_type: str\n :return: None\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
# _internal_host_check is for having an host check result
# without running a check plugin
if self.command_name.startswith('_internal_host_check'):
# Command line may contain: [state_id][;output]
parameters = self.command_line.split(';')
if len(parameters) < 2:
self.command_name = "_internal_host_check;0;Host assumed to be UP"
self.add_warning("[%s::%s] has no defined state nor output. Changed to %s"
% (self.my_type, self.command_name, self.command_name))
elif len(parameters) < 3:
state = 3
try:
state = int(parameters[1])
except ValueError:
self.add_warning("[%s::%s] required a non integer state: %s. Using 3."
% (self.my_type, self.command_name, parameters[1]))
if state > 4:
self.add_warning("[%s::%s] required an impossible state: %d. Using 3."
% (self.my_type, self.command_name, state))
output = {0: "UP", 1: "DOWN", 2: "DOWN", 3: "UNKNOWN", 4: "UNREACHABLE", }[state]
self.command_name = "_internal_host_check;Host assumed to be %s" % output
self.add_warning("[%s::%s] has no defined output. Changed to %s"
% (self.my_type, self.command_name, self.command_name))
elif len(parameters) > 3:
self.command_name = "%s;%s;%s" % (parameters[0], parameters[1], parameters[2])
self.add_warning("[%s::%s] has too many parameters. Changed to %s"
% (self.my_type, self.command_name, self.command_name))
return super(Command, self).is_correct() and state | [
"Check if this object configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get_name(self):
return getattr(self, 'dependent_host_name', '') + '/'\
+ getattr(self, 'dependent_service_description', '') \
+ '..' + getattr(self, 'host_name', '') + '/' \
+ getattr(self, 'service_description', '') | [
"Get name based on 4 class attributes\n Each attribute is substituted by '' if attribute does not exist\n\n :return: dependent_host_name/dependent_service_description..host_name/service_description\n :rtype: str\n TODO: Clean this function (use format for string)\n "
] |
Please provide a description of the function:def add_service_dependency(self, dep_host_name, dep_service_description,
par_host_name, par_service_description):
# We create a "standard" service_dep
prop = {
'dependent_host_name': dep_host_name,
'dependent_service_description': dep_service_description,
'host_name': par_host_name,
'service_description': par_service_description,
'notification_failure_criteria': 'u,c,w',
'inherits_parent': '1',
}
servicedep = Servicedependency(prop)
self.add_item(servicedep) | [
"Instantiate and add a Servicedependency object to the items dict::\n\n * notification criteria is \"u,c,w\"\n * inherits_parent is True\n\n :param dep_host_name: dependent host name\n :type dep_host_name: str\n :param dep_service_description: dependent service description\n :type dep_service_description: str\n :param par_host_name: host name\n :type par_host_name: str\n :param par_service_description: service description\n :type par_service_description: str\n :return: None\n "
] |
Please provide a description of the function:def explode_hostgroup(self, svc_dep, hostgroups):
# pylint: disable=too-many-locals
# We will create a service dependency for each host part of the host group
# First get services
snames = [d.strip() for d in svc_dep.service_description.split(',')]
# And dep services
dep_snames = [d.strip() for d in svc_dep.dependent_service_description.split(',')]
# Now for each host into hostgroup we will create a service dependency object
hg_names = [n.strip() for n in svc_dep.hostgroup_name.split(',')]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
err = "ERROR: the servicedependecy got an unknown hostgroup_name '%s'" % hg_name
self.add_error(err)
continue
hnames = []
hnames.extend([m.strip() for m in hostgroup.get_hosts()])
for hname in hnames:
for dep_sname in dep_snames:
for sname in snames:
new_sd = svc_dep.copy()
new_sd.host_name = hname
new_sd.service_description = sname
new_sd.dependent_host_name = hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd) | [
"Explode a service dependency for each member of hostgroup\n\n :param svc_dep: service dependency to explode\n :type svc_dep: alignak.objects.servicedependency.Servicedependency\n :param hostgroups: used to find hostgroup objects\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :return:None\n "
] |
Please provide a description of the function:def explode(self, hostgroups):
# pylint: disable=too-many-locals, too-many-branches
# The "old" services will be removed. All services with
# more than one host or a host group will be in it
srvdep_to_remove = []
# Then for every host create a copy of the service with just the host
# because we are adding services, we can't just loop in it
servicedeps = list(self.items.keys())
for s_id in servicedeps:
servicedep = self.items[s_id]
# First case: we only have to propagate the services dependencies to all the hosts
# of some hostgroups
# Either a specific property is defined (Shinken) or no dependent hosts groups
# is defined
if bool(getattr(servicedep, 'explode_hostgroup', 0)) or \
(hasattr(servicedep, 'hostgroup_name') and
not hasattr(servicedep, 'dependent_hostgroup_name')):
self.explode_hostgroup(servicedep, hostgroups)
srvdep_to_remove.append(s_id)
continue
# Get the list of all FATHER hosts and service dependenciess
hnames = []
if hasattr(servicedep, 'hostgroup_name'):
hg_names = [n.strip() for n in servicedep.hostgroup_name.split(',')]
hg_names = [hg_name.strip() for hg_name in hg_names]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
err = "ERROR: the servicedependecy got an" \
" unknown hostgroup_name '%s'" % hg_name
hostgroup.add_error(err)
continue
hnames.extend([m.strip() for m in hostgroup.get_hosts()])
if not hasattr(servicedep, 'host_name'):
servicedep.host_name = ''
if servicedep.host_name != '':
hnames.extend([n.strip() for n in servicedep.host_name.split(',')])
snames = [d.strip() for d in servicedep.service_description.split(',')]
couples = []
for hname in hnames:
for sname in snames:
couples.append((hname.strip(), sname.strip()))
if not hasattr(servicedep, 'dependent_hostgroup_name') \
and hasattr(servicedep, 'hostgroup_name'):
servicedep.dependent_hostgroup_name = servicedep.hostgroup_name
# Now the dependent part (the sons)
dep_hnames = []
if hasattr(servicedep, 'dependent_hostgroup_name'):
hg_names = [n.strip() for n in servicedep.dependent_hostgroup_name.split(',')]
hg_names = [hg_name.strip() for hg_name in hg_names]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
err = "ERROR: the servicedependecy got an " \
"unknown dependent_hostgroup_name '%s'" % hg_name
hostgroup.add_error(err)
continue
dep_hnames.extend([m.strip() for m in hostgroup.get_hosts()])
if not hasattr(servicedep, 'dependent_host_name'):
servicedep.dependent_host_name = getattr(servicedep, 'host_name', '')
if servicedep.dependent_host_name != '':
dep_hnames.extend([n.strip() for n in servicedep.dependent_host_name.split(',')])
dep_snames = [d.strip() for d in servicedep.dependent_service_description.split(',')]
dep_couples = []
for dep_hname in dep_hnames:
for dep_sname in dep_snames:
dep_couples.append((dep_hname.strip(), dep_sname.strip()))
# Create the new service deps from all of this.
for (dep_hname, dep_sname) in dep_couples: # the sons, like HTTP
for (hname, sname) in couples: # the fathers, like MySQL
new_sd = servicedep.copy()
new_sd.host_name = hname
new_sd.service_description = sname
new_sd.dependent_host_name = dep_hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd)
# Ok so we can remove the old one
srvdep_to_remove.append(s_id)
self.delete_servicesdep_by_id(srvdep_to_remove) | [
"Explode all service dependency for each member of hostgroups\n Each member of dependent hostgroup or hostgroup in dependency have to get a copy of\n service dependencies (quite complex to parse)\n\n :param hostgroups: used to look for hostgroup\n :type hostgroups: alignak.objects.hostgroup.Hostgroups\n :return: None\n "
] |
Please provide a description of the function:def linkify(self, hosts, services, timeperiods):
self.linkify_sd_by_s(hosts, services)
self.linkify_sd_by_tp(timeperiods)
self.linkify_s_by_sd(services) | [
"Create link between objects::\n\n * servicedependency -> host\n * servicedependency -> service\n * servicedependency -> timeperiods\n\n :param hosts: hosts to link\n :type hosts: alignak.objects.host.Hosts\n :param services: services to link\n :type services: alignak.objects.service.Services\n :param timeperiods: timeperiods to link\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: None\n "
] |
Please provide a description of the function:def linkify_sd_by_s(self, hosts, services):
to_del = []
errors = self.configuration_errors
warns = self.configuration_warnings
for servicedep in self:
try:
s_name = servicedep.dependent_service_description
hst_name = servicedep.dependent_host_name
# The new member list, in id
serv = services.find_srv_by_name_and_hostname(hst_name, s_name)
if serv is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(servicedep)
continue
servicedep.dependent_service_description = serv.uuid
s_name = servicedep.service_description
hst_name = servicedep.host_name
# The new member list, in id
serv = services.find_srv_by_name_and_hostname(hst_name, s_name)
if serv is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(servicedep)
continue
servicedep.service_description = serv.uuid
except AttributeError as err:
logger.error("[servicedependency] fail to linkify by service %s: %s",
servicedep, err)
to_del.append(servicedep)
for servicedep in to_del:
self.remove_item(servicedep) | [
"Replace dependent_service_description and service_description\n in service dependency by the real object\n\n :param hosts: host list, used to look for a specific one\n :type hosts: alignak.objects.host.Hosts\n :param services: service list to look for a specific one\n :type services: alignak.objects.service.Services\n :return: None\n "
] |
Please provide a description of the function:def linkify_sd_by_tp(self, timeperiods):
for servicedep in self:
try:
tp_name = servicedep.dependency_period
timeperiod = timeperiods.find_by_name(tp_name)
if timeperiod:
servicedep.dependency_period = timeperiod.uuid
else:
servicedep.dependency_period = ''
except AttributeError as exp:
logger.error("[servicedependency] fail to linkify by timeperiods: %s", exp) | [
"Replace dependency_period by a real object in service dependency\n\n :param timeperiods: list of timeperiod, used to look for a specific one\n :type timeperiods: alignak.objects.timeperiod.Timeperiods\n :return: None\n "
] |
Please provide a description of the function:def linkify_s_by_sd(self, services):
for servicedep in self:
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string", "undefined")
setattr(servicedep, "dependent_service_description_string", "undefined")
if getattr(servicedep, 'service_description', None) is None or\
getattr(servicedep, 'dependent_service_description', None) is None:
continue
services.add_act_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.notification_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
services.add_chk_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.execution_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string",
services[servicedep.service_description].get_name())
setattr(servicedep, "dependent_service_description_string",
services[servicedep.dependent_service_description].get_name()) | [
"Add dependency in service objects\n\n :return: None\n "
] |
Please provide a description of the function:def is_correct(self):
state = True
# Internal checks before executing inherited function...
loop = self.no_loop_in_parents("service_description", "dependent_service_description")
if loop:
msg = "Loop detected while checking service dependencies"
self.add_error(msg)
state = False
for item in self:
for elem in loop:
if elem == item.service_description:
msg = "Service %s is parent service_description in dependency "\
"defined in %s" % (
item.service_description_string, item.imported_from
)
self.add_error(msg)
elif elem == item.dependent_service_description:
msg = "Service %s is child service_description in dependency"\
" defined in %s" % (
item.dependent_service_description_string, item.imported_from
)
self.add_error(msg)
return super(Servicedependencies, self).is_correct() and state | [
"Check if this servicedependency configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n "
] |
Please provide a description of the function:def get_instance(mod_conf):
logger.info("Giving an instance of %s for alias: %s",
mod_conf.python_name, mod_conf.module_alias)
return InnerMetrics(mod_conf) | [
"\n Return a module instance for the modules manager\n\n :param mod_conf: the module properties as defined globally in this file\n :return:\n "
] |
Please provide a description of the function:def init(self): # pylint: disable=too-many-branches
if not self.enabled:
logger.info(" the module is disabled.")
return True
try:
connections = self.test_connection()
except Exception as exp: # pylint: disable=broad-except
logger.error("initialization, test connection failed. Error: %s", str(exp))
if self.influxdb_enabled:
try:
# Check that configured TSDB is existing, else creates...
dbs = self.influx.get_list_database()
for db in dbs:
if db.get('name') == self.influxdb_database:
logger.info("the database %s is existing.", self.influxdb_database)
break
else:
# Create the database
logger.info("creating database %s...", self.influxdb_database)
self.influx.create_database(self.influxdb_database)
# Check that configured TSDB retention is existing, else creates...
if self.influxdb_retention_name:
rps = self.influx.get_list_retention_policies()
for rp in rps:
if rp.get('name') == self.influxdb_retention_name:
logger.info("the retention policy %s is existing.",
self.influxdb_retention_name)
break
else:
# Create a retention policy for this database
logger.info("creating database retention policy: %s - %s - %s...",
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication)
self.influx.create_retention_policy(
self.influxdb_retention_name, self.influxdb_retention_duration,
self.influxdb_retention_replication, database=self.influxdb_database)
# Check that configured TSDB user is existing, else creates...
if self.influxdb_username:
users = self.influx.get_list_users()
for user in users:
if user.get('user') == self.influxdb_username:
logger.info("the user %s is existing.",
self.influxdb_username)
break
else:
# Create a retention policy for this database
logger.info("creating user: %s...", self.influxdb_username)
self.influx.create_user(self.influxdb_username, self.influxdb_password,
admin=False)
connections = connections or True
except Exception as exp: # pylint: disable=broad-except
logger.error("InfluxDB, DB initialization failed. Error: %s", str(exp))
return connections | [
"Called by the daemon broker to initialize the module"
] |
Please provide a description of the function:def get_metrics_from_perfdata(self, service, perf_data):
result = []
metrics = PerfDatas(perf_data)
for metric in metrics:
logger.debug("service: %s, metric: %s (%s)", service, metric, metric.__dict__)
if metric.name in ['time']:
metric.name = "duration"
name = sanitize_name(metric.name)
name = self.multiple_values.sub(r'.\1', name)
if not name:
continue
# get metric value and its thresholds values if they exist
name_value = {
name: metric.value,
'uom_' + name: metric.uom
}
# Get or ignore extra values depending upon module configuration
if metric.warning and self.send_warning:
name_value[name + '_warn'] = metric.warning
if metric.critical and self.send_critical:
name_value[name + '_crit'] = metric.critical
if metric.min and self.send_min:
name_value[name + '_min'] = metric.min
if metric.max and self.send_max:
name_value[name + '_max'] = metric.max
for key, value in name_value.items():
result.append((key, value, metric.uom))
logger.debug("Metrics: %s - %s", service, result)
return result | [
"Decode the performance data to build a metrics list"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.