desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'The default merging strategy. The base env is authoritative, so it is checked first, followed by the remaining environments. In top files from environments other than "base", only the section matching the environment from the top file will be considered, and it too will be ignored if that environment was defined in the "base" top file.'
def _merge_tops_merge(self, tops):
top = DefaultOrderedDict(OrderedDict) base_tops = tops.pop(u'base', DefaultOrderedDict(OrderedDict)) for ctop in base_tops: for (saltenv, targets) in six.iteritems(ctop): if (saltenv == u'include'): continue try: for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] except TypeError: raise SaltRenderError(u'Unable to render top file. No targets found.') for (cenv, ctops) in six.iteritems(tops): for ctop in ctops: for (saltenv, targets) in six.iteritems(ctop): if (saltenv == u'include'): continue elif (saltenv != cenv): log.debug(u"Section for saltenv '%s' in the '%s' saltenv's top file will be ignored, as the top_file_merging_strategy is set to 'merge' and the saltenvs do not match", saltenv, cenv) continue elif (saltenv in top): log.debug(u"Section for saltenv '%s' in the '%s' saltenv's top file will be ignored, as this saltenv was already defined in the 'base' top file", saltenv, cenv) continue try: for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] except TypeError: raise SaltRenderError(u'Unable to render top file. No targets found.') return top
'For each saltenv, only consider the top file from that saltenv. All sections matching a given saltenv, which appear in a different saltenv\'s top file, will be ignored.'
def _merge_tops_same(self, tops):
top = DefaultOrderedDict(OrderedDict) for (cenv, ctops) in six.iteritems(tops): if all([(x == {}) for x in ctops]): default_top = self.opts[u'default_top'] fallback_tops = tops.get(default_top, []) if all([(x == {}) for x in fallback_tops]): log.error(u"The '%s' saltenv has no top file, and the fallback saltenv specified by default_top (%s) also has no top file", cenv, default_top) continue for ctop in fallback_tops: for (saltenv, targets) in six.iteritems(ctop): if (saltenv != cenv): continue log.debug(u"The '%s' saltenv has no top file, using the default_top saltenv (%s)", cenv, default_top) for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] break else: log.error(u"The '%s' saltenv has no top file, and no matches were found in the top file for the default_top saltenv (%s)", cenv, default_top) continue else: for ctop in ctops: for (saltenv, targets) in six.iteritems(ctop): if (saltenv == u'include'): continue elif (saltenv != cenv): log.debug(u"Section for saltenv '%s' in the '%s' saltenv's top file will be ignored, as the top_file_merging_strategy is set to 'same' and the saltenvs do not match", saltenv, cenv) continue try: for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] except TypeError: raise SaltRenderError(u'Unable to render top file. No targets found.') return top
'Merge the top files into a single dictionary'
def _merge_tops_merge_all(self, tops):
def _read_tgt(tgt): match_type = None states = [] for item in tgt: if isinstance(item, dict): match_type = item if isinstance(item, six.string_types): states.append(item) return (match_type, states) top = DefaultOrderedDict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for (saltenv, targets) in six.iteritems(ctop): if (saltenv == u'include'): continue try: for tgt in targets: if (tgt not in top[saltenv]): top[saltenv][tgt] = ctop[saltenv][tgt] continue (m_type1, m_states1) = _read_tgt(top[saltenv][tgt]) (m_type2, m_states2) = _read_tgt(ctop[saltenv][tgt]) merged = [] match_type = (m_type2 or m_type1) if (match_type is not None): merged.append(match_type) merged.extend(m_states1) merged.extend([x for x in m_states2 if (x not in merged)]) top[saltenv][tgt] = merged except TypeError: raise SaltRenderError(u'Unable to render top file. No targets found.') return top
'Verify the contents of the top file data'
def verify_tops(self, tops):
errors = [] if (not isinstance(tops, dict)): errors.append(u'Top data was not formed as a dict') return errors for (saltenv, matches) in six.iteritems(tops): if (saltenv == u'include'): continue if (not isinstance(saltenv, six.string_types)): errors.append(u'Environment {0} in top file is not formed as a string'.format(saltenv)) if (saltenv == u''): errors.append(u'Empty saltenv statement in top file') if (not isinstance(matches, dict)): errors.append(u'The top file matches for saltenv {0} are not formatted as a dict'.format(saltenv)) for slsmods in six.itervalues(matches): if (not isinstance(slsmods, list)): errors.append(u'Malformed topfile (state declarations not formed as a list)') continue for slsmod in slsmods: if isinstance(slsmod, dict): for val in six.itervalues(slsmod): if (not val): errors.append(u'Improperly formatted top file matcher in saltenv {0}: {1} file'.format(slsmod, val)) elif isinstance(slsmod, six.string_types): if (not slsmod): errors.append(u'Environment {0} contains an empty sls index'.format(saltenv)) return errors
'Returns the high data derived from the top file'
def get_top(self):
try: tops = self.get_tops() except SaltRenderError as err: log.error((u'Unable to render top file: ' + str(err.error))) return {} return self.merge_tops(tops)
'Search through the top high data for matches and return the states that this minion needs to execute. Returns: {\'saltenv\': [\'state1\', \'state2\', ...]}'
def top_matches(self, top):
matches = {} for (saltenv, body) in six.iteritems(top): if self.opts[u'environment']: if (saltenv != self.opts[u'environment']): continue for (match, data) in six.iteritems(body): def _filter_matches(_match, _data, _opts): if isinstance(_data, six.string_types): _data = [_data] if self.matcher.confirm_top(_match, _data, _opts): if (saltenv not in matches): matches[saltenv] = [] for item in _data: if (u'subfilter' in item): _tmpdata = item.pop(u'subfilter') for (match, data) in six.iteritems(_tmpdata): _filter_matches(match, data, _opts) if isinstance(item, six.string_types): matches[saltenv].append(item) elif isinstance(item, dict): (env_key, inc_sls) = item.popitem() if (env_key not in self.avail): continue if (env_key not in matches): matches[env_key] = [] matches[env_key].append(inc_sls) _filter_matches(match, data, self.opts[u'nodegroups']) ext_matches = self._master_tops() for saltenv in ext_matches: top_file_matches = matches.get(saltenv, []) if self.opts[u'master_tops_first']: first = ext_matches[saltenv] second = top_file_matches else: first = top_file_matches second = ext_matches[saltenv] matches[saltenv] = (first + [x for x in second if (x not in first)]) return matches
'Get results from the master_tops system. Override this function if the execution of the master_tops needs customization.'
def _master_tops(self):
return self.client.master_tops()
'If autoload_dynamic_modules is True then automatically load the dynamic modules'
def load_dynamic(self, matches):
if (not self.opts[u'autoload_dynamic_modules']): return syncd = self.state.functions[u'saltutil.sync_all'](list(matches), refresh=False) if syncd[u'grains']: self.opts[u'grains'] = salt.loader.grains(self.opts) self.state.opts[u'pillar'] = self.state._gather_pillar() self.state.module_refresh()
'Render a state file and retrieve all of the include states'
def render_state(self, sls, saltenv, mods, matches, local=False):
errors = [] if (not local): state_data = self.client.get_state(sls, saltenv) fn_ = state_data.get(u'dest', False) else: fn_ = sls if (not os.path.isfile(fn_)): errors.append(u'Specified SLS {0} on local filesystem cannot be found.'.format(sls)) if (not fn_): errors.append(u'Specified SLS {0} in saltenv {1} is not available on the salt master or through a configured fileserver'.format(sls, saltenv)) state = None try: state = compile_template(fn_, self.state.rend, self.state.opts[u'renderer'], self.state.opts[u'renderer_blacklist'], self.state.opts[u'renderer_whitelist'], saltenv, sls, rendered_sls=mods) except SaltRenderError as exc: msg = u"Rendering SLS '{0}:{1}' failed: {2}".format(saltenv, sls, exc) log.critical(msg) errors.append(msg) except Exception as exc: msg = u'Rendering SLS {0} failed, render error: {1}'.format(sls, exc) log.critical(msg, exc_info_on_loglevel=logging.DEBUG) errors.append(u'{0}\n{1}'.format(msg, traceback.format_exc())) try: mods.add(u'{0}:{1}'.format(saltenv, sls)) except AttributeError: pass if state: if (not isinstance(state, dict)): errors.append(u'SLS {0} does not render to a dictionary'.format(sls)) else: include = [] if (u'include' in state): if (not isinstance(state[u'include'], list)): err = u'Include Declaration in SLS {0} is not formed as a list'.format(sls) errors.append(err) else: include = state.pop(u'include') self._handle_extend(state, sls, saltenv, errors) self._handle_exclude(state, sls, saltenv, errors) self._handle_state_decls(state, sls, saltenv, errors) for inc_sls in include: xenv_key = u'_xenv' if isinstance(inc_sls, dict): (env_key, inc_sls) = inc_sls.popitem() else: env_key = saltenv if (env_key not in self.avail): msg = u"Nonexistent saltenv '{0}' found in include of '{1}' within SLS '{2}:{3}'".format(env_key, inc_sls, saltenv, sls) log.error(msg) errors.append(msg) continue if inc_sls.startswith(u'.'): match = re.match('^(\\.+)(.*)$', inc_sls) if match: (levels, include) = match.groups() else: msg = u"Badly formatted include {0} found in include in SLS '{2}:{3}'".format(inc_sls, saltenv, sls) log.error(msg) errors.append(msg) continue level_count = len(levels) p_comps = sls.split(u'.') if state_data.get(u'source', u'').endswith(u'/init.sls'): p_comps.append(u'init') if (level_count > len(p_comps)): msg = u"Attempted relative include of '{0}' within SLS '{1}:{2}' goes beyond top level package ".format(inc_sls, saltenv, sls) log.error(msg) errors.append(msg) continue inc_sls = u'.'.join((p_comps[:(- level_count)] + [include])) if (env_key != xenv_key): if (matches is None): matches = [] if ((env_key in matches) or fnmatch.filter(self.avail[env_key], inc_sls)): resolved_envs = [env_key] else: resolved_envs = [] else: resolved_envs = [aenv for aenv in matches if fnmatch.filter(self.avail[aenv], inc_sls)] if ((len(resolved_envs) == 1) or (saltenv in resolved_envs)): sls_targets = (fnmatch.filter(self.avail[saltenv], inc_sls) or [inc_sls]) for sls_target in sls_targets: r_env = (resolved_envs[0] if (len(resolved_envs) == 1) else saltenv) mod_tgt = u'{0}:{1}'.format(r_env, sls_target) if (mod_tgt not in mods): (nstate, err) = self.render_state(sls_target, r_env, mods, matches) if nstate: self.merge_included_states(state, nstate, errors) state.update(nstate) if err: errors.extend(err) else: msg = u'' if (not resolved_envs): msg = u'Unknown include: Specified SLS {0}: {1} is not available on the salt master in saltenv(s): {2} '.format(env_key, inc_sls, (u', '.join(matches) if (env_key == xenv_key) else env_key)) elif (len(resolved_envs) > 1): msg = u'Ambiguous include: Specified SLS {0}: {1} is available on the salt master in multiple available saltenvs: {2}'.format(env_key, inc_sls, u', '.join(resolved_envs)) log.critical(msg) errors.append(msg) try: self._handle_iorder(state) except TypeError: log.critical(u'Could not render SLS %s. Syntax error detected.', sls) else: state = {} return (state, errors)
'Take a state and apply the iorder system'
def _handle_iorder(self, state):
if self.opts[u'state_auto_order']: for name in state: for s_dec in state[name]: if (not isinstance(s_dec, six.string_types)): continue if (not isinstance(state[name], dict)): continue if (not isinstance(state[name][s_dec], list)): continue found = False if s_dec.startswith(u'_'): continue for arg in state[name][s_dec]: if isinstance(arg, dict): if (len(arg) > 0): if (next(six.iterkeys(arg)) == u'order'): found = True if (not found): if (not isinstance(state[name][s_dec], list)): continue state[name][s_dec].append({u'order': self.iorder}) self.iorder += 1 return state
'Add sls and saltenv components to the state'
def _handle_state_decls(self, state, sls, saltenv, errors):
for name in state: if (not isinstance(state[name], dict)): if (name == u'__extend__'): continue if (name == u'__exclude__'): continue if isinstance(state[name], six.string_types): if (u'.' in state[name]): comps = state[name].split(u'.') state[name] = {u'__sls__': sls, u'__env__': saltenv, comps[0]: [comps[1]]} continue errors.append(u'ID {0} in SLS {1} is not a dictionary'.format(name, sls)) continue skeys = set() for key in list(state[name]): if key.startswith(u'_'): continue if (not isinstance(state[name][key], list)): continue if (u'.' in key): comps = key.split(u'.') if (comps[0] in skeys): errors.append(u"ID '{0}' in SLS '{1}' contains multiple state declarations of the same type".format(name, sls)) continue state[name][comps[0]] = state[name].pop(key) state[name][comps[0]].append(comps[1]) skeys.add(comps[0]) continue skeys.add(key) if (u'__sls__' not in state[name]): state[name][u'__sls__'] = sls if (u'__env__' not in state[name]): state[name][u'__env__'] = saltenv
'Take the extend dec out of state and apply to the highstate global dec'
def _handle_extend(self, state, sls, saltenv, errors):
if (u'extend' in state): ext = state.pop(u'extend') if (not isinstance(ext, dict)): errors.append(u"Extension value in SLS '{0}' is not a dictionary".format(sls)) return for name in ext: if (not isinstance(ext[name], dict)): errors.append(u"Extension name '{0}' in SLS '{1}' is not a dictionary".format(name, sls)) continue if (u'__sls__' not in ext[name]): ext[name][u'__sls__'] = sls if (u'__env__' not in ext[name]): ext[name][u'__env__'] = saltenv for key in list(ext[name]): if key.startswith(u'_'): continue if (not isinstance(ext[name][key], list)): continue if (u'.' in key): comps = key.split(u'.') ext[name][comps[0]] = ext[name].pop(key) ext[name][comps[0]].append(comps[1]) state.setdefault(u'__extend__', []).append(ext)
'Take the exclude dec out of the state and apply it to the highstate global dec'
def _handle_exclude(self, state, sls, saltenv, errors):
if (u'exclude' in state): exc = state.pop(u'exclude') if (not isinstance(exc, list)): err = u'Exclude Declaration in SLS {0} is not formed as a list'.format(sls) errors.append(err) state.setdefault(u'__exclude__', []).extend(exc)
'Gather the state files and render them into a single unified salt high data structure.'
def render_highstate(self, matches):
highstate = self.building_highstate all_errors = [] mods = set() statefiles = [] for (saltenv, states) in six.iteritems(matches): for sls_match in states: try: statefiles = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: all_errors.extend([u"No matching salt environment for environment '{0}' found".format(saltenv)]) if (not statefiles): statefiles = [sls_match] for sls in statefiles: r_env = u'{0}:{1}'.format(saltenv, sls) if (r_env in mods): continue (state, errors) = self.render_state(sls, saltenv, mods, matches) if state: self.merge_included_states(highstate, state, errors) for (i, error) in enumerate(errors[:]): if (u'is not available' in error): this_sls = u'SLS {0} in saltenv'.format(sls_match) if (this_sls in error): errors[i] = u"No matching sls found for '{0}' in env '{1}'".format(sls_match, saltenv) all_errors.extend(errors) self.clean_duplicate_extends(highstate) return (highstate, all_errors)
'Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors'
def _check_pillar(self, force=False):
if force: return True if (u'_errors' in self.state.opts[u'pillar']): return False return True
'Reads over the matches and returns a matches dict with just the ones that are in the whitelist'
def matches_whitelist(self, matches, whitelist):
if (not whitelist): return matches ret_matches = {} if (not isinstance(whitelist, list)): whitelist = whitelist.split(u',') for env in matches: for sls in matches[env]: if (sls in whitelist): ret_matches[env] = (ret_matches[env] if (env in ret_matches) else []) ret_matches[env].append(sls) return ret_matches
'Run the sequence to execute the salt highstate for this minion'
def call_highstate(self, exclude=None, cache=None, cache_name=u'highstate', force=False, whitelist=None, orchestration_jid=None):
tag_name = u'no_|-states_|-states_|-None' ret = {tag_name: {u'result': False, u'comment': u'No states found for this minion', u'name': u'No States', u'changes': {}, u'__run_num__': 0}} cfn = os.path.join(self.opts[u'cachedir'], u'{0}.cache.p'.format(cache_name)) if cache: if os.path.isfile(cfn): with salt.utils.files.fopen(cfn, u'rb') as fp_: high = self.serial.load(fp_) return self.state.call_high(high, orchestration_jid) err = [] try: top = self.get_top() except SaltRenderError as err: ret[tag_name][u'comment'] = u'Unable to render top file: ' ret[tag_name][u'comment'] += str(err.error) return ret except Exception: trb = traceback.format_exc() err.append(trb) return err err += self.verify_tops(top) matches = self.top_matches(top) if (not matches): msg = u'No Top file or master_tops data matches found.' ret[tag_name][u'comment'] = msg return ret matches = self.matches_whitelist(matches, whitelist) self.load_dynamic(matches) if (not self._check_pillar(force)): err += [u'Pillar failed to render with the following messages:'] err += self.state.opts[u'pillar'][u'_errors'] else: (high, errors) = self.render_highstate(matches) if exclude: if isinstance(exclude, six.string_types): exclude = exclude.split(u',') if (u'__exclude__' in high): high[u'__exclude__'].extend(exclude) else: high[u'__exclude__'] = exclude err += errors if err: return err if (not high): return ret cumask = os.umask(63) try: if salt.utils.platform.is_windows(): self.state.functions[u'cmd.run']([u'attrib', u'-R', cfn], python_shell=False, output_loglevel=u'quiet') with salt.utils.files.fopen(cfn, u'w+b') as fp_: try: self.serial.dump(high, fp_) except TypeError: pass except (IOError, OSError): log.error(u'Unable to write to "state.highstate" cache file %s', cfn) os.umask(cumask) return self.state.call_high(high, orchestration_jid)
'Return just the highstate or the errors'
def compile_highstate(self):
err = [] top = self.get_top() err += self.verify_tops(top) matches = self.top_matches(top) (high, errors) = self.render_highstate(matches) err += errors if err: return err return high
'Compile the highstate but don\'t run it, return the low chunks to see exactly what the highstate will execute'
def compile_low_chunks(self):
top = self.get_top() matches = self.top_matches(top) (high, errors) = self.render_highstate(matches) (high, ext_errors) = self.state.reconcile_extend(high) errors += ext_errors errors += self.state.verify_high(high) (high, req_in_errors) = self.state.requisite_in(high) errors += req_in_errors high = self.state.apply_exclude(high) if errors: return errors chunks = self.state.compile_high_data(high) return chunks
'Return all used and unused states for the minion based on the top match data'
def compile_state_usage(self):
err = [] top = self.get_top() err += self.verify_tops(top) if err: return err matches = self.top_matches(top) state_usage = {} for (saltenv, states) in self.avail.items(): env_usage = {u'used': [], u'unused': [], u'count_all': 0, u'count_used': 0, u'count_unused': 0} env_matches = matches.get(saltenv) for state in states: env_usage[u'count_all'] += 1 if (state in env_matches): env_usage[u'count_used'] += 1 env_usage[u'used'].append(state) else: env_usage[u'count_unused'] += 1 env_usage[u'unused'].append(state) state_usage[saltenv] = env_usage return state_usage
'Load the modules into the state'
def load_modules(self, data=None, proxy=None):
log.info(u'Loading fresh modules for state activity') self.functions = salt.client.FunctionWrapper(self.opts, self.opts[u'id']) self.utils = salt.loader.utils(self.opts) self.serializers = salt.loader.serializers(self.opts) self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
'Return the state data from the master'
def compile_master(self):
load = {u'grains': self.grains, u'opts': self.opts, u'cmd': u'_master_state'} try: return self.channel.send(load, tries=3, timeout=72000) except SaltReqTimeoutError: return {}
'Run the SPM command'
def run(self, args):
command = args[0] try: if (command == 'install'): self._install(args) elif (command == 'local'): self._local(args) elif (command == 'repo'): self._repo(args) elif (command == 'remove'): self._remove(args) elif (command == 'build'): self._build(args) elif (command == 'update_repo'): self._download_repo_metadata(args) elif (command == 'create_repo'): self._create_repo(args) elif (command == 'files'): self._list_files(args) elif (command == 'info'): self._info(args) elif (command == 'list'): self._list(args) else: raise SPMInvocationError("Invalid command '{0}'".format(command)) except SPMException as exc: self.ui.error(str(exc))
'Process local commands'
def _list(self, args):
args.pop(0) command = args[0] if (command == 'packages'): self._list_packages(args) elif (command == 'files'): self._list_files(args) elif (command == 'repos'): self._repo_list(args) else: raise SPMInvocationError("Invalid list command '{0}'".format(command))
'Process local commands'
def _local(self, args):
args.pop(0) command = args[0] if (command == 'install'): self._local_install(args) elif (command == 'files'): self._local_list_files(args) elif (command == 'info'): self._local_info(args) else: raise SPMInvocationError("Invalid local command '{0}'".format(command))
'Process repo commands'
def _repo(self, args):
args.pop(0) command = args[0] if (command == 'list'): self._repo_list(args) elif (command == 'packages'): self._repo_packages(args) elif (command == 'search'): self._repo_packages(args, search=True) elif (command == 'update'): self._download_repo_metadata(args) elif (command == 'create'): self._create_repo(args) else: raise SPMInvocationError("Invalid repo command '{0}'".format(command))
'List packages for one or more configured repos'
def _repo_packages(self, args, search=False):
packages = [] repo_metadata = self._get_repo_metadata() for repo in repo_metadata: for pkg in repo_metadata[repo]['packages']: if (args[1] in pkg): version = repo_metadata[repo]['packages'][pkg]['info']['version'] release = repo_metadata[repo]['packages'][pkg]['info']['release'] packages.append((pkg, version, release, repo)) for pkg in sorted(packages): self.ui.status('{0} DCTB {1}-{2} DCTB {3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])) return packages
'List configured repos This can be called either as a ``repo`` command or a ``list`` command'
def _repo_list(self, args):
repo_metadata = self._get_repo_metadata() for repo in repo_metadata: self.ui.status(repo)
'Install a package from a repo'
def _install(self, args):
if (len(args) < 2): raise SPMInvocationError('A package must be specified') caller_opts = self.opts.copy() caller_opts['file_client'] = 'local' self.caller = salt.client.Caller(mopts=caller_opts) self.client = salt.client.get_local_client(self.opts['conf_file']) cache = salt.cache.Cache(self.opts) packages = args[1:] file_map = {} optional = [] recommended = [] to_install = [] for pkg in packages: if pkg.endswith('.spm'): if self._pkgfiles_fun('path_exists', pkg): comps = pkg.split('-') comps = '-'.join(comps[:(-2)]).split('/') pkg_name = comps[(-1)] formula_tar = tarfile.open(pkg, 'r:bz2') formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) formula_def = yaml.safe_load(formula_ref) file_map[pkg_name] = pkg (to_, op_, re_) = self._check_all_deps(pkg_name=pkg_name, pkg_file=pkg, formula_def=formula_def) to_install.extend(to_) optional.extend(op_) recommended.extend(re_) else: raise SPMInvocationError('Package file {0} not found'.format(pkg)) else: (to_, op_, re_) = self._check_all_deps(pkg_name=pkg) to_install.extend(to_) optional.extend(op_) recommended.extend(re_) optional = set(filter(len, optional)) if optional: self.ui.status('The following dependencies are optional:\n DCTB {0}\n'.format('\n DCTB '.join(optional))) recommended = set(filter(len, recommended)) if recommended: self.ui.status('The following dependencies are recommended:\n DCTB {0}\n'.format('\n DCTB '.join(recommended))) to_install = set(filter(len, to_install)) msg = 'Installing packages:\n DCTB {0}\n'.format('\n DCTB '.join(to_install)) if (not self.opts['assume_yes']): self.ui.confirm(msg) repo_metadata = self._get_repo_metadata() dl_list = {} for package in to_install: if (package in file_map): self._install_indv_pkg(package, file_map[package]) else: for repo in repo_metadata: repo_info = repo_metadata[repo] if (package in repo_info['packages']): dl_package = False repo_ver = repo_info['packages'][package]['info']['version'] repo_rel = repo_info['packages'][package]['info']['release'] repo_url = repo_info['info']['url'] if (package in dl_list): if (repo_ver == dl_list[package]['version']): if (repo_rel > dl_list[package]['release']): dl_package = True elif (repo_rel == dl_list[package]['release']): if dl_list[package]['source'].startswith('file://'): if (not repo_url.startswith('file://')): dl_package = True elif (repo_ver > dl_list[package]['version']): dl_package = True else: dl_package = True if (dl_package is True): cache_path = os.path.join(self.opts['spm_cache_dir'], repo) dl_url = '{0}/{1}'.format(repo_info['info']['url'], repo_info['packages'][package]['filename']) out_file = os.path.join(cache_path, repo_info['packages'][package]['filename']) dl_list[package] = {'version': repo_ver, 'release': repo_rel, 'source': dl_url, 'dest_dir': cache_path, 'dest_file': out_file} for package in dl_list: dl_url = dl_list[package]['source'] cache_path = dl_list[package]['dest_dir'] out_file = dl_list[package]['dest_file'] if (not os.path.exists(cache_path)): os.makedirs(cache_path) if dl_url.startswith('file://'): dl_url = dl_url.replace('file://', '') shutil.copyfile(dl_url, out_file) else: with salt.utils.files.fopen(out_file, 'w') as outf: outf.write(self._query_http(dl_url, repo_info['info'])) for package in dl_list: self._install_indv_pkg(package, out_file) return
'Install a package from a file'
def _local_install(self, args, pkg_name=None):
if (len(args) < 2): raise SPMInvocationError('A package file must be specified') self._install(args)
'Starting with one package, check all packages for dependencies'
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
if (pkg_file and (not os.path.exists(pkg_file))): raise SPMInvocationError('Package file {0} not found'.format(pkg_file)) self.repo_metadata = self._get_repo_metadata() if (not formula_def): for repo in self.repo_metadata: if (not isinstance(self.repo_metadata[repo]['packages'], dict)): continue if (pkg_name in self.repo_metadata[repo]['packages']): formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info'] if (not formula_def): raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name)) pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn) pkgs_to_install = [] if ((pkg_info is None) or self.opts['force']): pkgs_to_install.append(pkg_name) elif ((pkg_info is not None) and (not self.opts['force'])): raise SPMPackageError('Package {0} already installed, not installing again'.format(formula_def['name'])) optional_install = [] recommended_install = [] if (('dependencies' in formula_def) or ('optional' in formula_def) or ('recommended' in formula_def)): self.avail_pkgs = {} for repo in self.repo_metadata: if (not isinstance(self.repo_metadata[repo]['packages'], dict)): continue for pkg in self.repo_metadata[repo]['packages']: self.avail_pkgs[pkg] = repo (needs, unavail, optional, recommended) = self._resolve_deps(formula_def) if (len(unavail) > 0): raise SPMPackageError('Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(formula_def['name'], '\n'.join(unavail))) if optional: optional_install.extend(optional) for dep_pkg in optional: pkg_info = self._pkgdb_fun('info', formula_def['name']) msg = dep_pkg if isinstance(pkg_info, dict): msg = '{0} [Installed]'.format(dep_pkg) optional_install.append(msg) if recommended: recommended_install.extend(recommended) for dep_pkg in recommended: pkg_info = self._pkgdb_fun('info', formula_def['name']) msg = dep_pkg if isinstance(pkg_info, dict): msg = '{0} [Installed]'.format(dep_pkg) recommended_install.append(msg) if needs: pkgs_to_install.extend(needs) for dep_pkg in needs: pkg_info = self._pkgdb_fun('info', formula_def['name']) msg = dep_pkg if isinstance(pkg_info, dict): msg = '{0} [Installed]'.format(dep_pkg) return (pkgs_to_install, optional_install, recommended_install)
'Install one individual package'
def _install_indv_pkg(self, pkg_name, pkg_file):
self.ui.status('... installing {0}'.format(pkg_name)) formula_tar = tarfile.open(pkg_file, 'r:bz2') formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) formula_def = yaml.safe_load(formula_ref) for field in ('version', 'release', 'summary', 'description'): if (field not in formula_def): raise SPMPackageError('Invalid package: the {0} was not found'.format(field)) pkg_files = formula_tar.getmembers() existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def) if (existing_files and (not self.opts['force'])): raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(pkg_name, '\n'.join(existing_files))) self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn) if ('pre_local_state' in formula_def): high_data = self._render(formula_def['pre_local_state'], formula_def) ret = self.caller.cmd('state.high', data=high_data) if ('pre_tgt_state' in formula_def): log.debug('Executing pre_tgt_state script') high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def) tgt = formula_def['pre_tgt_state']['tgt'] ret = self.client.run_job(tgt=formula_def['pre_tgt_state']['tgt'], fun='state.high', tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'), timout=self.opts['timeout'], data=high_data) uid = self.opts.get('spm_uid', os.getuid()) gid = self.opts.get('spm_gid', os.getgid()) uname = pwd.getpwuid(uid)[0] gname = grp.getgrgid(gid)[0] for member in pkg_files: member.uid = uid member.gid = gid member.uname = uname member.gname = gname out_path = self._pkgfiles_fun('install_file', pkg_name, formula_tar, member, formula_def, self.files_conn) if (out_path is not False): if member.isdir(): digest = '' else: self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace) file_hash = hashlib.sha1() digest = self._pkgfiles_fun('hash_file', os.path.join(out_path, member.name), file_hash, self.files_conn) self._pkgdb_fun('register_file', pkg_name, member, out_path, digest, self.db_conn) if ('post_local_state' in formula_def): log.debug('Executing post_local_state script') high_data = self._render(formula_def['post_local_state'], formula_def) self.caller.cmd('state.high', data=high_data) if ('post_tgt_state' in formula_def): log.debug('Executing post_tgt_state script') high_data = self._render(formula_def['post_tgt_state']['data'], formula_def) tgt = formula_def['post_tgt_state']['tgt'] ret = self.client.run_job(tgt=formula_def['post_tgt_state']['tgt'], fun='state.high', tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'), timout=self.opts['timeout'], data=high_data) formula_tar.close()
'Return a list of packages which need to be installed, to resolve all dependencies'
def _resolve_deps(self, formula_def):
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name']) if (not isinstance(pkg_info, dict)): pkg_info = {} can_has = {} cant_has = [] if (('dependencies' in formula_def) and (formula_def['dependencies'] is None)): formula_def['dependencies'] = '' for dep in formula_def.get('dependencies', '').split(','): dep = dep.strip() if (not dep): continue if self.pkgdb['{0}.info'.format(self.db_prov)](dep): continue if (dep in self.avail_pkgs): can_has[dep] = self.avail_pkgs[dep] else: cant_has.append(dep) optional = formula_def.get('optional', '').split(',') recommended = formula_def.get('recommended', '').split(',') inspected = [] to_inspect = can_has.copy() while (len(to_inspect) > 0): dep = next(six.iterkeys(to_inspect)) del to_inspect[dep] if (dep in inspected): continue inspected.append(dep) repo_contents = self.repo_metadata.get(can_has[dep], {}) repo_packages = repo_contents.get('packages', {}) dep_formula = repo_packages.get(dep, {}).get('info', {}) (also_can, also_cant, opt_dep, rec_dep) = self._resolve_deps(dep_formula) can_has.update(also_can) cant_has = sorted(set((cant_has + also_cant))) optional = sorted(set((optional + opt_dep))) recommended = sorted(set((recommended + rec_dep))) return (can_has, cant_has, optional, recommended)
'Traverse through all repo files and apply the functionality provided in the callback to them'
def _traverse_repos(self, callback, repo_name=None):
repo_files = [] if os.path.exists(self.opts['spm_repos_config']): repo_files.append(self.opts['spm_repos_config']) for (dirpath, dirnames, filenames) in os.walk('{0}.d'.format(self.opts['spm_repos_config'])): for repo_file in filenames: if (not repo_file.endswith('.repo')): continue repo_files.append(repo_file) for repo_file in repo_files: repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file) with salt.utils.files.fopen(repo_path) as rph: repo_data = yaml.safe_load(rph) for repo in repo_data: if (repo_data[repo].get('enabled', True) is False): continue if ((repo_name is not None) and (repo != repo_name)): continue callback(repo, repo_data[repo])
'Download files via http'
def _query_http(self, dl_path, repo_info):
query = None response = None try: if ('username' in repo_info): try: if ('password' in repo_info): query = http.query(dl_path, text=True, username=repo_info['username'], password=repo_info['password']) else: raise SPMException("Auth defined, but password is not set for username: '{0}'".format(repo_info['username'])) except SPMException as exc: self.ui.error(str(exc)) else: query = http.query(dl_path, text=True) except SPMException as exc: self.ui.error(str(exc)) try: if query: if ('SPM-METADATA' in dl_path): response = yaml.safe_load(query.get('text', '{}')) else: response = query.get('text') else: raise SPMException('Response is empty, please check for Errors above.') except SPMException as exc: self.ui.error(str(exc)) return response
'Connect to all repos and download metadata'
def _download_repo_metadata(self, args):
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir']) def _update_metadata(repo, repo_info): dl_path = '{0}/SPM-METADATA'.format(repo_info['url']) if dl_path.startswith('file://'): dl_path = dl_path.replace('file://', '') with salt.utils.files.fopen(dl_path, 'r') as rpm: metadata = yaml.safe_load(rpm) else: metadata = self._query_http(dl_path, repo_info) cache.store('.', repo, metadata) repo_name = (args[1] if (len(args) > 1) else None) self._traverse_repos(_update_metadata, repo_name)
'Return cached repo metadata'
def _get_repo_metadata(self):
cache = salt.cache.Cache(self.opts, self.opts['spm_cache_dir']) metadata = {} def _read_metadata(repo, repo_info): if (cache.updated('.', repo) is None): log.warn('Updating repo metadata') self._download_repo_metadata({}) metadata[repo] = {'info': repo_info, 'packages': cache.fetch('.', repo)} self._traverse_repos(_read_metadata) return metadata
'Scan a directory and create an SPM-METADATA file which describes all of the SPM files in that directory.'
def _create_repo(self, args):
if (len(args) < 2): raise SPMInvocationError('A path to a directory must be specified') if (args[1] == '.'): repo_path = os.environ['PWD'] else: repo_path = args[1] old_files = [] repo_metadata = {} for (dirpath, dirnames, filenames) in os.walk(repo_path): for spm_file in filenames: if (not spm_file.endswith('.spm')): continue spm_path = '{0}/{1}'.format(repo_path, spm_file) if (not tarfile.is_tarfile(spm_path)): continue comps = spm_file.split('-') spm_name = '-'.join(comps[:(-2)]) spm_fh = tarfile.open(spm_path, 'r:bz2') formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name)) formula_conf = yaml.safe_load(formula_handle.read()) use_formula = True if (spm_name in repo_metadata): cur_info = repo_metadata[spm_name]['info'] new_info = formula_conf if (int(new_info['version']) == int(cur_info['version'])): if (int(new_info['release']) < int(cur_info['release'])): use_formula = False elif (int(new_info['version']) < int(cur_info['version'])): use_formula = False if (use_formula is True): log.debug('{0} {1}-{2} had been added, but {3}-{4} will replace it'.format(spm_name, cur_info['version'], cur_info['release'], new_info['version'], new_info['release'])) old_files.append(repo_metadata[spm_name]['filename']) else: log.debug('{0} {1}-{2} has been found, but is older than {3}-{4}'.format(spm_name, new_info['version'], new_info['release'], cur_info['version'], cur_info['release'])) old_files.append(spm_file) if (use_formula is True): log.debug('adding {0}-{1}-{2} to the repo'.format(formula_conf['name'], formula_conf['version'], formula_conf['release'])) repo_metadata[spm_name] = {'info': formula_conf.copy()} repo_metadata[spm_name]['filename'] = spm_file metadata_filename = '{0}/SPM-METADATA'.format(repo_path) with salt.utils.files.fopen(metadata_filename, 'w') as mfh: yaml.dump(repo_metadata, mfh, indent=4, canonical=False, default_flow_style=False, Dumper=SafeOrderedDumper) log.debug('Wrote {0}'.format(metadata_filename)) for file_ in old_files: if (self.opts['spm_repo_dups'] == 'ignore'): log.debug('{0} will be left in the directory'.format(file_)) elif (self.opts['spm_repo_dups'] == 'archive'): if (not os.path.exists('./archive')): try: os.makedirs('./archive') log.debug('{0} has been archived'.format(file_)) except IOError: log.error('Unable to create archive directory') try: shutil.move(file_, './archive') except (IOError, OSError): log.error('Unable to archive {0}'.format(file_)) elif (self.opts['spm_repo_dups'] == 'delete'): try: os.remove(file_) log.debug('{0} has been deleted'.format(file_)) except IOError: log.error('Unable to delete {0}'.format(file_)) except OSError: pass
'Remove a package'
def _remove(self, args):
if (len(args) < 2): raise SPMInvocationError('A package must be specified') packages = args[1:] msg = 'Removing packages:\n DCTB {0}'.format('\n DCTB '.join(packages)) if (not self.opts['assume_yes']): self.ui.confirm(msg) for package in packages: self.ui.status('... removing {0}'.format(package)) if (not self._pkgdb_fun('db_exists', self.opts['spm_db'])): raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package)) pkg_info = self._pkgdb_fun('info', package, self.db_conn) if (pkg_info is None): raise SPMInvocationError('Package {0} not installed'.format(package)) files = self._pkgdb_fun('list_files', package, self.db_conn) dirs = [] for filerow in files: if self._pkgfiles_fun('path_isdir', filerow[0]): dirs.append(filerow[0]) continue file_hash = hashlib.sha1() digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn) if (filerow[1] == digest): self._verbose('Removing file {0}'.format(filerow[0]), log.trace) self._pkgfiles_fun('remove_file', filerow[0], self.files_conn) else: self._verbose('Not removing file {0}'.format(filerow[0]), log.trace) self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn) for dir_ in sorted(dirs, reverse=True): self._pkgdb_fun('unregister_file', dir_, package, self.db_conn) try: self._verbose('Removing directory {0}'.format(dir_), log.trace) os.rmdir(dir_) except OSError: self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace) self._pkgdb_fun('unregister_pkg', package, self.db_conn)
'Display verbose information'
def _verbose(self, msg, level=log.debug):
if (self.opts.get('verbose', False) is True): self.ui.status(msg) level(msg)
'List info for a package file'
def _local_info(self, args):
if (len(args) < 2): raise SPMInvocationError('A package filename must be specified') pkg_file = args[1] if (not os.path.exists(pkg_file)): raise SPMInvocationError('Package file {0} not found'.format(pkg_file)) comps = pkg_file.split('-') comps = '-'.join(comps[:(-2)]).split('/') name = comps[(-1)] formula_tar = tarfile.open(pkg_file, 'r:bz2') formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name)) formula_def = yaml.safe_load(formula_ref) self.ui.status(self._get_info(formula_def))
'List info for a package'
def _info(self, args):
if (len(args) < 2): raise SPMInvocationError('A package must be specified') package = args[1] pkg_info = self._pkgdb_fun('info', package, self.db_conn) if (pkg_info is None): raise SPMPackageError('package {0} not installed'.format(package)) self.ui.status(self._get_info(pkg_info))
'Get package info'
def _get_info(self, formula_def):
fields = ('name', 'os', 'os_family', 'release', 'version', 'dependencies', 'os_dependencies', 'os_family_dependencies', 'summary', 'description') for item in fields: if (item not in formula_def): formula_def[item] = 'None' if ('installed' not in formula_def): formula_def['installed'] = 'Not installed' return 'Name: {name}\nVersion: {version}\nRelease: {release}\nInstall Date: {installed}\nSupported OSes: {os}\nSupported OS families: {os_family}\nDependencies: {dependencies}\nOS Dependencies: {os_dependencies}\nOS Family Dependencies: {os_family_dependencies}\nSummary: {summary}\nDescription:\n{description}'.format(**formula_def)
'List files for a package file'
def _local_list_files(self, args):
if (len(args) < 2): raise SPMInvocationError('A package filename must be specified') pkg_file = args[1] if (not os.path.exists(pkg_file)): raise SPMPackageError('Package file {0} not found'.format(pkg_file)) formula_tar = tarfile.open(pkg_file, 'r:bz2') pkg_files = formula_tar.getmembers() for member in pkg_files: self.ui.status(member.name)
'List files for an installed package'
def _list_packages(self, args):
packages = self._pkgdb_fun('list_packages', self.db_conn) for package in packages: if self.opts['verbose']: status_msg = ','.join(package) else: status_msg = package[0] self.ui.status(status_msg)
'List files for an installed package'
def _list_files(self, args):
if (len(args) < 2): raise SPMInvocationError('A package name must be specified') package = args[(-1)] files = self._pkgdb_fun('list_files', package, self.db_conn) if (files is None): raise SPMPackageError('package {0} not installed'.format(package)) else: for file_ in files: if self.opts['verbose']: status_msg = ','.join(file_) else: status_msg = file_[0] self.ui.status(status_msg)
'Build a package'
def _build(self, args):
if (len(args) < 2): raise SPMInvocationError('A path to a formula must be specified') self.abspath = args[1].rstrip('/') comps = self.abspath.split('/') self.relpath = comps[(-1)] formula_path = '{0}/FORMULA'.format(self.abspath) if (not os.path.exists(formula_path)): raise SPMPackageError('Formula file {0} not found'.format(formula_path)) with salt.utils.files.fopen(formula_path) as fp_: formula_conf = yaml.safe_load(fp_) for field in ('name', 'version', 'release', 'summary', 'description'): if (field not in formula_conf): raise SPMPackageError('Invalid package: a {0} must be defined'.format(field)) out_path = '{0}/{1}-{2}-{3}.spm'.format(self.opts['spm_build_dir'], formula_conf['name'], formula_conf['version'], formula_conf['release']) if (not os.path.exists(self.opts['spm_build_dir'])): os.mkdir(self.opts['spm_build_dir']) self.formula_conf = formula_conf formula_tar = tarfile.open(out_path, 'w:bz2') if ('files' in formula_conf): if isinstance(formula_conf['files'], list): formula_dir = tarfile.TarInfo(formula_conf['name']) formula_dir.type = tarfile.DIRTYPE formula_tar.addfile(formula_dir) for file_ in formula_conf['files']: for ftype in FILE_TYPES: if file_.startswith('{0}|'.format(ftype)): file_ = file_.lstrip('{0}|'.format(ftype)) formula_tar.add(os.path.join(os.getcwd(), file_), os.path.join(formula_conf['name'], file_)) else: try: formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude) formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude) except TypeError: formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude) formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude) formula_tar.close() self.ui.status('Built package {0}'.format(out_path))
'Exclude based on opts'
def _exclude(self, member):
if isinstance(member, string_types): return None for item in self.opts['spm_build_exclude']: if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)): return None elif member.name.startswith('{0}/{1}'.format(self.abspath, item)): return None return member
'Render a [pre|post]_local_state or [pre|post]_tgt_state script'
def _render(self, data, formula_def):
renderer = formula_def.get('renderer', self.opts.get('renderer', 'yaml_jinja')) rend = salt.loader.render(self.opts, {}) blacklist = self.opts.get('renderer_blacklist') whitelist = self.opts.get('renderer_whitelist') template_vars = formula_def.copy() template_vars['opts'] = self.opts.copy() return compile_template(':string:', rend, renderer, blacklist, whitelist, input_data=data, **template_vars)
'Report an SPMClient status message'
def status(self, msg):
raise NotImplementedError()
'Report an SPM error message'
def error(self, msg):
raise NotImplementedError()
'Get confirmation from the user before performing an SPMClient action. Return if the action is confirmed, or raise SPMOperationCanceled(<msg>) if canceled.'
def confirm(self, action):
raise NotImplementedError()
'Process the configured beacons The config must be a list and looks like this in yaml .. code_block:: yaml beacons: inotify: - /etc/fstab: {} - /var/cache/foo: {}'
def process(self, config, grains):
ret = [] b_config = copy.deepcopy(config) if (('enabled' in b_config) and (not b_config['enabled'])): return for mod in config: if (mod == 'enabled'): continue current_beacon_config = None if isinstance(config[mod], list): current_beacon_config = {} list(map(current_beacon_config.update, config[mod])) elif isinstance(config[mod], dict): current_beacon_config = config[mod] if ('enabled' in current_beacon_config): if (not current_beacon_config['enabled']): log.trace('Beacon {0} disabled'.format(mod)) continue elif isinstance(config[mod], dict): del config[mod]['enabled'] else: self._remove_list_item(config[mod], 'enabled') log.trace('Beacon processing: {0}'.format(mod)) fun_str = '{0}.beacon'.format(mod) if (fun_str in self.beacons): runonce = self._determine_beacon_config(current_beacon_config, 'run_once') interval = self._determine_beacon_config(current_beacon_config, 'interval') if interval: b_config = self._trim_config(b_config, mod, 'interval') if (not self._process_interval(mod, interval)): log.trace('Skipping beacon {0}. Interval not reached.'.format(mod)) continue if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'): log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod)) b_config = self._trim_config(b_config, mod, 'disable_during_state_run') is_running = False running_jobs = salt.utils.minion.running(self.opts) for job in running_jobs: if re.match('state.*', job['fun']): is_running = True if is_running: close_str = '{0}.close'.format(mod) if (close_str in self.beacons): log.info('Closing beacon {0}. State run in progress.'.format(mod)) self.beacons[close_str](b_config[mod]) else: log.info('Skipping beacon {0}. State run in progress.'.format(mod)) continue self.beacons[fun_str].__globals__['__grains__'] = grains raw = self.beacons[fun_str](b_config[mod]) for data in raw: tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod) if ('tag' in data): tag += data.pop('tag') if ('id' not in data): data['id'] = self.opts['id'] ret.append({'tag': tag, 'data': data}) if runonce: self.disable_beacon(mod) else: log.warning('Unable to process beacon {0}'.format(mod)) return ret
'Take a beacon configuration and strip out the interval bits'
def _trim_config(self, b_config, mod, key):
if isinstance(b_config[mod], list): self._remove_list_item(b_config[mod], key) elif isinstance(b_config[mod], dict): b_config[mod].pop(key) return b_config
'Process a beacon configuration to determine its interval'
def _determine_beacon_config(self, current_beacon_config, key):
interval = False if isinstance(current_beacon_config, dict): interval = current_beacon_config.get(key, False) return interval
'Process beacons with intervals Return True if a beacon should be run on this loop'
def _process_interval(self, mod, interval):
log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod)) loop_interval = self.opts['loop_interval'] if (mod in self.interval_map): log.trace('Processing interval in map') counter = self.interval_map[mod] log.trace('Interval counter: {0}'.format(counter)) if ((counter * loop_interval) >= interval): self.interval_map[mod] = 1 return True else: self.interval_map[mod] += 1 else: log.trace('Interval process inserting mod: {0}'.format(mod)) self.interval_map[mod] = 1 return False
'Return the index of a labeled config item in the beacon config, -1 if the index is not found'
def _get_index(self, beacon_config, label):
indexes = [index for (index, item) in enumerate(beacon_config) if (label in item)] if (len(indexes) < 1): return (-1) else: return indexes[0]
'Remove an item from a beacon config list'
def _remove_list_item(self, beacon_config, label):
index = self._get_index(beacon_config, label) del beacon_config[index]
'Update whether an individual beacon is enabled'
def _update_enabled(self, name, enabled_value):
if isinstance(self.opts['beacons'][name], dict): self.opts['beacons'][name]['enabled'] = enabled_value else: enabled_index = self._get_index(self.opts['beacons'][name], 'enabled') if (enabled_index >= 0): self.opts['beacons'][name][enabled_index]['enabled'] = enabled_value else: self.opts['beacons'][name].append({'enabled': enabled_value})
'List the beacon items'
def list_beacons(self):
evt = salt.utils.event.get_event('minion', opts=self.opts) b_conf = self.functions['config.merge']('beacons') self.opts['beacons'].update(b_conf) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacons_list_complete') return True
'Add a beacon item'
def add_beacon(self, name, beacon_data):
data = {} data[name] = beacon_data if (name in self.opts['beacons']): log.info('Updating settings for beacon item: {0}'.format(name)) else: log.info('Added new beacon item {0}'.format(name)) self.opts['beacons'].update(data) evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_add_complete') return True
'Modify a beacon item'
def modify_beacon(self, name, beacon_data):
data = {} data[name] = beacon_data log.info('Updating settings for beacon item: {0}'.format(name)) self.opts['beacons'].update(data) evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_modify_complete') return True
'Delete a beacon item'
def delete_beacon(self, name):
if (name in self.opts['beacons']): log.info('Deleting beacon item {0}'.format(name)) del self.opts['beacons'][name] evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_delete_complete') return True
'Enable beacons'
def enable_beacons(self):
self.opts['beacons']['enabled'] = True evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacons_enabled_complete') return True
'Enable beacons'
def disable_beacons(self):
self.opts['beacons']['enabled'] = False evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacons_disabled_complete') return True
'Enable a beacon'
def enable_beacon(self, name):
self._update_enabled(name, True) evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_enabled_complete') return True
'Disable a beacon'
def disable_beacon(self, name):
self._update_enabled(name, False) evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_disabled_complete') return True
'Connect to F5'
def _connect(self):
try: self.bigIP = f5.BIGIP(hostname=self.lb, username=self.username, password=self.password, fromurl=True, wsdls=['LocalLB.VirtualServer', 'LocalLB.Pool']) except Exception: raise Exception('Unable to connect to {0}'.format(self.lb)) return True
'Create a virtual server'
def create_vs(self, name, ip, port, protocol, profile, pool_name):
vs = self.bigIP.LocalLB.VirtualServer vs_def = vs.typefactory.create('Common.VirtualServerDefinition') vs_def.name = name vs_def.address = ip vs_def.port = port common_protocols = vs.typefactory.create('Common.ProtocolType') p = [i[0] for i in common_protocols if (i[0].split('_')[1] == protocol.upper())] if p: vs_def.protocol = p else: raise CommandExecutionError('Unknown protocol') vs_def_seq = vs.typefactory.create('Common.VirtualServerSequence') vs_def_seq.item = [vs_def] vs_type = vs.typefactory.create('LocalLB.VirtualServer.VirtualServerType') vs_resource = vs.typefactory.create('LocalLB.VirtualServer.VirtualServerResource') vs_resource.type = vs_type.RESOURCE_TYPE_POOL vs_resource.default_pool_name = pool_name resource_seq = vs.typefactory.create('LocalLB.VirtualServer.VirtualServerResourceSequence') resource_seq.item = [vs_resource] vs_context = vs.typefactory.create('LocalLB.ProfileContextType') vs_profile = vs.typefactory.create('LocalLB.VirtualServer.VirtualServerProfile') vs_profile.profile_context = vs_context.PROFILE_CONTEXT_TYPE_ALL vs_profile.profile_name = protocol vs_profile_http = vs.typefactory.create('LocalLB.VirtualServer.VirtualServerProfile') vs_profile_http.profile_name = profile vs_profile_conn = vs.typefactory.create('LocalLB.VirtualServer.VirtualServerProfile') vs_profile_conn.profile_name = 'oneconnect' vs_profile_seq = vs.typefactory.create('LocalLB.VirtualServer.VirtualServerProfileSequence') vs_profile_seq.item = [vs_profile, vs_profile_http, vs_profile_conn] try: vs.create(definitions=vs_def_seq, wildmasks=['255.255.255.255'], resources=resource_seq, profiles=[vs_profile_seq]) except Exception as e: raise Exception('Unable to create `{0}` virtual server\n\n{1}'.format(name, e)) return True
'Create a pool on the F5 load balancer'
def create_pool(self, name, method='ROUND_ROBIN'):
lbmethods = self.bigIP.LocalLB.Pool.typefactory.create('LocalLB.LBMethod') supported_method = [i[0] for i in lbmethods if (i[0].split('_', 2)[(-1)] == method.upper())] if (supported_method and (not self.check_pool(name))): try: self.bigIP.LocalLB.Pool.create(pool_names=[name], lb_methods=[supported_method], members=[[]]) except Exception as e: raise Exception('Unable to create `{0}` pool\n\n{1}'.format(name, e)) else: raise Exception('Unsupported method') return True
'Add a node to a pool'
def add_pool_member(self, name, port, pool_name):
if (not self.check_pool(pool_name)): raise CommandExecutionError('{0} pool does not exists'.format(pool_name)) members_seq = self.bigIP.LocalLB.Pool.typefactory.create('Common.IPPortDefinitionSequence') members_seq.items = [] member = self.bigIP.LocalLB.Pool.typefactory.create('Common.IPPortDefinition') member.address = name member.port = port members_seq.items.append(member) try: self.bigIP.LocalLB.Pool.add_member(pool_names=[pool_name], members=[members_seq]) except Exception as e: raise Exception('Unable to add `{0}` to `{1}`\n\n{2}'.format(name, pool_name, e)) return True
'Check to see if a pool exists'
def check_pool(self, name):
pools = self.bigIP.LocalLB.Pool for pool in pools.get_list(): if (pool.split('/')[(-1)] == name): return True return False
'Check to see if a virtual server exists'
def check_virtualserver(self, name):
vs = self.bigIP.LocalLB.VirtualServer for v in vs.get_list(): if (v.split('/')[(-1)] == name): return True return False
'Check a pool member exists in a specific pool'
def check_member_pool(self, member, pool_name):
members = self.bigIP.LocalLB.Pool.get_member(pool_names=[pool_name])[0] for mem in members: if (member == mem.address): return True return False
'List all the load balancer methods'
def lbmethods(self):
methods = self.bigIP.LocalLB.Pool.typefactory.create('LocalLB.LBMethod') return [method[0].split('_', 2)[(-1)] for method in methods]
'Format the log record to include exc_info if the handler is enabled for a specific log level'
def format(self, record):
formatted_record = super(ExcInfoOnLogLevelFormatMixIn, self).format(record) exc_info_on_loglevel = getattr(record, 'exc_info_on_loglevel', None) exc_info_on_loglevel_formatted = getattr(record, 'exc_info_on_loglevel_formatted', None) if ((exc_info_on_loglevel is None) and (exc_info_on_loglevel_formatted is None)): return formatted_record if (self.level > exc_info_on_loglevel): return formatted_record if ((not record.exc_info_on_loglevel_instance) and (not exc_info_on_loglevel_formatted)): return formatted_record if (record.exc_info_on_loglevel_formatted is None): if (self.formatter is None): self.formatter = logging._defaultFormatter record.exc_info_on_loglevel_formatted = self.formatter.formatException(record.exc_info_on_loglevel_instance) if (formatted_record[(-1):] != '\n'): formatted_record += '\n' try: formatted_record += record.exc_info_on_loglevel_formatted except UnicodeError: formatted_record += record.exc_info_on_loglevel_formatted.decode(sys.getfilesystemencoding(), 'replace') record.exc_info_on_loglevel_instance = None return formatted_record
'Sync the stored log records to the provided log handlers.'
def sync_with_handlers(self, handlers=()):
if (not handlers): return while self.__messages: record = self.__messages.pop(0) for handler in handlers: if (handler.level > record.levelno): continue handler.handle(record)
'Override the default error handling mechanism Deal with log file rotation errors due to log file in use more softly.'
def handleError(self, record):
handled = False if (sys.platform.startswith('win') and logging.raiseExceptions and sys.stderr): (exc_type, exc, exc_traceback) = sys.exc_info() try: if ((exc_type.__name__ in ('PermissionError', 'OSError')) and (exc.winerror == 32)): if (self.level <= logging.WARNING): sys.stderr.write('[WARNING ] Unable to rotate the log file "{0}" because it is in use\n'.format(self.baseFilename)) handled = True finally: del exc_type, exc, exc_traceback if (not handled): super(RotatingFileHandler, self).handleError(record)
'We override `__new__` in our logging logger class in order to provide some additional features like expand the module name padding if length is being used, and also some Unicode fixes. This code overhead will only be executed when the class is instantiated, i.e.: logging.getLogger(__name__)'
def __new__(cls, *args):
instance = super(SaltLoggingClass, cls).__new__(cls) try: max_logger_length = len(max(list(logging.Logger.manager.loggerDict), key=len)) for handler in logging.root.handlers: if (handler in (LOGGING_NULL_HANDLER, LOGGING_STORE_HANDLER, LOGGING_TEMP_HANDLER)): continue formatter = handler.formatter if (not formatter): continue if (not handler.lock): handler.createLock() handler.acquire() fmt = formatter._fmt.replace('%', '%%') match = MODNAME_PATTERN.search(fmt) if (not match): handler.release() return instance if ('digits' not in match.groupdict()): handler.release() return instance digits = match.group('digits') if ((not digits) or (not (digits and digits.isdigit()))): handler.release() return instance if (int(digits) < max_logger_length): fmt = fmt.replace(match.group('name'), '%%(name)-%ds') formatter = logging.Formatter((fmt % max_logger_length), datefmt=formatter.datefmt) handler.setFormatter(formatter) handler.release() except ValueError: pass return instance
'Set up the process executor'
def __init__(self, opts, fun, config, funcs, runners, proxy, log_queue=None):
super(Engine, self).__init__(log_queue=log_queue) self.opts = opts self.config = config self.fun = fun self.funcs = funcs self.runners = runners self.proxy = proxy
'Run the master service!'
def run(self):
self.utils = salt.loader.utils(self.opts, proxy=self.proxy) if salt.utils.platform.is_windows(): if (self.opts['__role'] == 'master'): self.runners = salt.loader.runner(self.opts, utils=self.utils) else: self.runners = [] self.funcs = salt.loader.minion_mods(self.opts, utils=self.utils, proxy=self.proxy) self.engine = salt.loader.engines(self.opts, self.funcs, self.runners, self.utils, proxy=self.proxy) kwargs = (self.config or {}) try: self.engine[self.fun](**kwargs) except Exception as exc: log.critical('Engine {0} could not be started! Error: {1}'.format(self.engine, exc))
'This function will parse the raw syslog data, dynamically create the topic according to the topic specified by the user (if specified) and decide whether to send the syslog data as an event on the master bus, based on the constraints given by the user. :param data: The raw syslog event data which is to be parsed. :param host: The IP of the host from where syslog is forwarded. :param port: Port of the junos device from which the data is sent :param options: kwargs provided by the user in the configuration file. :return: The result dictionary which contains the data and the topic, if the event is to be sent on the bus.'
def parseData(self, data, host, port, options):
data = self.obj.parse(data) data['hostip'] = host log.debug('Junos Syslog - received {0} from {1}, sent from port {2}'.format(data, host, port)) send_this_event = True for key in options: if (key in data): if isinstance(options[key], (six.string_types, int)): if (str(options[key]) != str(data[key])): send_this_event = False break elif isinstance(options[key], list): for opt in options[key]: if (str(opt) == str(data[key])): break else: send_this_event = False break else: raise Exception('Arguments in config not specified properly') else: raise Exception('Please check the arguments given to junos engine in the configuration file') if send_this_event: if ('event' in data): topic = 'jnpr/syslog' for i in range(2, len(self.title)): topic += ('/' + str(data[self.title[i]])) log.debug('Junos Syslog - sending this event on the bus: {0} from {1}'.format(data, host)) result = {'send': True, 'data': data, 'topic': topic} return result else: raise Exception('The incoming event data could not be parsed properly.') else: result = {'send': False} return result
'This function identifies whether the engine is running on the master or the minion and sends the data to the master event bus accordingly. :param result: It\'s a dictionary which has the final data and topic.'
def send_event_to_salt(self, result):
if result['send']: data = result['data'] topic = result['topic'] if (__opts__['__role'] == 'master'): event.get_master_event(__opts__, __opts__['sock_dir']).fire_event(data, topic) else: __salt__['event.fire_master'](data=data, tag=topic)
'Log the error messages.'
def handle_error(self, err_msg):
log.error(err_msg.getErrorMessage)
'Build the SLSMap'
def construct_yaml_omap(self, node):
sls_map = SLSMap() if (not isinstance(node, MappingNode)): raise ConstructorError(None, None, 'expected a mapping node, but found {0}'.format(node.id), node.start_mark) self.flatten_mapping(node) for (key_node, value_node) in node.value: reset = (key_node.tag == u'!reset') if (key_node.tag == u'!aggregate'): log.warning('!aggregate applies on values only, not on keys') value_node.tag = key_node.tag key_node.tag = self.resolve_sls_tag(key_node)[0] key = self.construct_object(key_node, deep=False) try: hash(key) except TypeError: err = 'While constructing a mapping {0} found unacceptable key {1}'.format(node.start_mark, key_node.start_mark) raise ConstructorError(err) value = self.construct_object(value_node, deep=False) if ((key in sls_map) and (not reset)): value = merge_recursive(sls_map[key], value) sls_map[key] = value return sls_map
'Build the SLSString.'
def construct_sls_str(self, node):
obj = self.construct_scalar(node) if six.PY2: obj = obj.encode('utf-8') return SLSString(obj)
'Verify integers and pass them in correctly is they are declared as octal'
def construct_sls_int(self, node):
if (node.value == '0'): pass elif (node.value.startswith('0') and (not node.value.startswith(('0b', '0x')))): node.value = node.value.lstrip('0') if (node.value == ''): node.value = '0' return int(node.value)
'Check cache for the data. If it is there, check to see if it needs to be refreshed. If the data is not there, or it needs to be refreshed, then call the callback function (``fun``) with any given ``**kwargs``. In some cases, the callback function returns a list of objects which need to be processed by a second function. If that is the case, then the second function is passed in as ``loop_fun``. Each item in the return list from the first function will be the only argument for the second function.'
def cache(self, bank, key, fun, loop_fun=None, **kwargs):
expire_seconds = kwargs.get('expire', 86400) updated = self.updated(bank, key) update_cache = False if (updated is None): update_cache = True elif ((int(time.time()) - updated) > expire_seconds): update_cache = True data = self.fetch(bank, key) if ((not data) or (update_cache is True)): if (loop_fun is not None): data = [] items = fun(**kwargs) for item in items: data.append(loop_fun(item)) else: data = fun(**kwargs) self.store(bank, key, data) return data
'Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).'
def store(self, bank, key, data):
fun = '{0}.store'.format(self.driver) return self.modules[fun](bank, key, data, **self._kwargs)
'Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).'
def fetch(self, bank, key):
fun = '{0}.fetch'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
'Get the last updated epoch for the specified key :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return an int epoch time in seconds or None if the object wasn\'t found in cache. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).'
def updated(self, bank, key):
fun = '{0}.updated'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
'Remove the key from the cache bank with all the key content. If no key is specified remove the entire bank with all keys and sub-banks inside. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).'
def flush(self, bank, key=None):
fun = '{0}.flush'.format(self.driver) return self.modules[fun](bank, key=key, **self._kwargs)
'Lists entries stored in the specified bank. :param bank: The name of the location inside the cache which will hold the key and its associated data. :return: An iterable object containing all bank entries. Returns an empty iterator if the bank doesn\'t exists. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).'
def ls(self, bank):
fun = '{0}.ls'.format(self.driver) return self.modules[fun](bank, **self._kwargs)
'Checks if the specified bank contains the specified key. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Returns True if the specified key exists in the given bank and False if not. If key is None checks for the bank existense. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).'
def contains(self, bank, key=None):
fun = '{0}.contains'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
'Make sure that this path is intended for the salt master and trim it'
def _check_proto(self, path):
if (not path.startswith(u'salt://')): raise MinionError(u'Unsupported path: {0}'.format(path)) (file_path, saltenv) = salt.utils.url.parse(path) return file_path
'Helper util to return a list of files in a directory'
def _file_local_list(self, dest):
if os.path.isdir(dest): destdir = dest else: destdir = os.path.dirname(dest) filelist = set() for (root, dirs, files) in os.walk(destdir, followlinks=True): for name in files: path = os.path.join(root, name) filelist.add(path) return filelist
'Return the local location to cache the file, cache dirs will be made'
@contextlib.contextmanager def _cache_loc(self, path, saltenv=u'base', cachedir=None):
cachedir = self.get_cachedir(cachedir) dest = salt.utils.path.join(cachedir, u'files', saltenv, path) destdir = os.path.dirname(dest) cumask = os.umask(63) if os.path.isfile(destdir): os.remove(destdir) try: os.makedirs(destdir) except OSError as exc: if (exc.errno != errno.EEXIST): raise (yield dest) os.umask(cumask)
'Copies a file from the local files or master depending on implementation'
def get_file(self, path, dest=u'', makedirs=False, saltenv=u'base', gzip=None, cachedir=None):
raise NotImplementedError
'List the empty dirs'
def file_list_emptydirs(self, saltenv=u'base', prefix=u''):
raise NotImplementedError
'Pull a file down from the file server and store it in the minion file cache'
def cache_file(self, path, saltenv=u'base', cachedir=None):
return self.get_url(path, u'', True, saltenv, cachedir=cachedir)