desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Take a load and send it across the network to connected minions'
def _send_pub(self, load):
for (transport, opts) in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.publish(load)
'Take a given load and perform the necessary steps to prepare a publication. TODO: This is really only bound by temporal cohesion and thus should be refactored even further.'
def _prep_pub(self, minions, jid, clear_load, extra):
clear_load[u'jid'] = jid delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM) self.event.fire_event({u'minions': minions}, clear_load[u'jid']) new_job_load = {u'jid': clear_load[u'jid'], u'tgt_type': clear_load[u'tgt_type'], u'tgt': clear_load[u'tgt'], u'user': clear_load[u'user'], u'fun': clear_load[u'fun'], u'arg': clear_load[u'arg'], u'minions': minions} self.event.fire_event(new_job_load, tagify([clear_load[u'jid'], u'new'], u'job')) if self.opts[u'ext_job_cache']: fstr = u'{0}.save_load'.format(self.opts[u'ext_job_cache']) save_load_func = True try: arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr]) if (u'minions' not in arg_spec.args): log.critical(u"The specified returner used for the external job cache '%s' does not have a 'minions' kwarg in the returner's save_load function.", self.opts[u'ext_job_cache']) except (AttributeError, KeyError): save_load_func = False log.critical(u'The specified returner used for the external job cache "%s" does not have a save_load function!', self.opts[u'ext_job_cache']) if save_load_func: try: self.mminion.returners[fstr](clear_load[u'jid'], clear_load, minions=minions) except Exception: log.critical(u'The specified returner threw a stack trace:\n', exc_info=True) try: fstr = u'{0}.save_load'.format(self.opts[u'master_job_cache']) self.mminion.returners[fstr](clear_load[u'jid'], clear_load, minions) except KeyError: log.critical(u'The specified returner used for the master job cache "%s" does not have a save_load function!', self.opts[u'master_job_cache']) except Exception: log.critical(u'The specified returner threw a stack trace:\n', exc_info=True) payload = {u'enc': u'aes'} load = {u'fun': clear_load[u'fun'], u'arg': clear_load[u'arg'], u'tgt': clear_load[u'tgt'], u'jid': clear_load[u'jid'], u'ret': clear_load[u'ret']} if (u'master_id' in self.opts): load[u'master_id'] = self.opts[u'master_id'] if (u'master_id' in extra): load[u'master_id'] = extra[u'master_id'] if (delimiter != DEFAULT_TARGET_DELIM): load[u'delimiter'] = delimiter if (u'id' in extra): load[u'id'] = extra[u'id'] if (u'tgt_type' in clear_load): load[u'tgt_type'] = clear_load[u'tgt_type'] if (u'to' in clear_load): load[u'to'] = clear_load[u'to'] if (u'kwargs' in clear_load): if (u'ret_config' in clear_load[u'kwargs']): load[u'ret_config'] = clear_load[u'kwargs'].get(u'ret_config') if (u'metadata' in clear_load[u'kwargs']): load[u'metadata'] = clear_load[u'kwargs'].get(u'metadata') if (u'module_executors' in clear_load[u'kwargs']): load[u'module_executors'] = clear_load[u'kwargs'].get(u'module_executors') if (u'executor_opts' in clear_load[u'kwargs']): load[u'executor_opts'] = clear_load[u'kwargs'].get(u'executor_opts') if (u'ret_kwargs' in clear_load[u'kwargs']): load[u'ret_kwargs'] = clear_load[u'kwargs'].get(u'ret_kwargs') if (u'user' in clear_load): log.info(u'User %s Published command %s with jid %s', clear_load[u'user'], clear_load[u'fun'], clear_load[u'jid']) load[u'user'] = clear_load[u'user'] else: log.info(u'Published command %s with jid %s', clear_load[u'fun'], clear_load[u'jid']) log.debug(u'Published command details %s', load) return load
'Send the load back to the sender.'
def ping(self, clear_load):
return clear_load
'Prepare the needed objects and socket for iteration within ioflo'
def setup(self):
salt.utils.appendproctitle(self.__class__.__name__) self.clear_funcs = salt.master.ClearFuncs(self.opts, self.key) self.aes_funcs = salt.master.AESFuncs(self.opts) self.context = zmq.Context(1) self.socket = self.context.socket(zmq.REP) if (self.opts.get(u'ipc_mode', u'') == u'tcp'): self.w_uri = u'tcp://127.0.0.1:{0}'.format(self.opts.get(u'tcp_master_workers', 4515)) else: self.w_uri = u'ipc://{0}'.format(os.path.join(self.opts[u'sock_dir'], u'workers.ipc')) log.info(u'ZMQ Worker binding to socket %s', self.w_uri) self.poller = zmq.Poller() self.poller.register(self.socket, zmq.POLLIN) self.socket.connect(self.w_uri)
'Handle a single request'
def handle_request(self):
try: polled = self.poller.poll(1) if polled: package = self.socket.recv() self._update_aes() payload = self.serial.loads(package) ret = self.serial.dumps(self._handle_payload(payload)) self.socket.send(ret) except KeyboardInterrupt: raise except Exception as exc: if (isinstance(exc, zmq.ZMQError) and (exc.errno == errno.EINTR)): return
'string value, only works for full repr'
def __str__(self):
return self.dict_to_line(self.criteria)
'always works'
def __repr__(self):
return str(self.criteria)
'returns an instance with just those keys'
def pick(self, keys):
subset = dict([(key, self.criteria[key]) for key in keys]) return self.__class__(**subset)
'Store non-empty, non-null values to use as filter'
def __init__(self, **criteria):
items = [key_value for key_value in six.iteritems(criteria) if (key_value[1] is not None)] items = [(key_value1[0], str(key_value1[1])) for key_value1 in items] self.criteria = dict(items)
'Resolve equivalent paths equivalently'
@staticmethod def norm_path(path):
return os.path.normcase(os.path.normpath(path))
'compare potentially partial criteria against line'
def match(self, line):
entry = self.dict_from_line(line) for (key, value) in six.iteritems(self.criteria): if (entry[key] != value): return False return True
'string value, only works for full repr'
def __str__(self):
return self.dict_to_line(self.criteria)
'always works'
def __repr__(self):
return str(self.criteria)
'returns an instance with just those keys'
def pick(self, keys):
subset = dict([(key, self.criteria[key]) for key in keys]) return self.__class__(**subset)
'Store non-empty, non-null values to use as filter'
def __init__(self, **criteria):
items = [key_value for key_value in six.iteritems(criteria) if (key_value[1] is not None)] items = [(key_value1[0], str(key_value1[1])) for key_value1 in items] self.criteria = dict(items)
'Resolve equivalent paths equivalently'
@staticmethod def norm_path(path):
return os.path.normcase(os.path.normpath(path))
'compare potentially partial criteria against line'
def match(self, line):
entry = self.dict_from_line(line) for (key, value) in six.iteritems(self.criteria): if (entry[key] != value): return False return True
'this function generates a search string. simplifying the search function while still providing as many features as possible.'
def AutoSearch(self):
search_string = '' searchParams = [] if self.skipInstalled: searchParams.append('IsInstalled=0') else: searchParams.append('IsInstalled=1') if self.skipHidden: searchParams.append('IsHidden=0') else: searchParams.append('IsHidden=1') if self.skipReboot: searchParams.append('RebootRequired=0') else: searchParams.append('RebootRequired=1') if self.skipPresent: searchParams.append('IsPresent=0') else: searchParams.append('IsPresent=1') for i in searchParams: search_string += '{0} and '.format(i) if ((not self.skipSoftwareUpdates) and (not self.skipDriverUpdates)): search_string += "Type='Software' or Type='Driver'" elif (not self.skipSoftwareUpdates): search_string += "Type='Software'" elif (not self.skipDriverUpdates): search_string += "Type='Driver'" else: return False log.debug('generated search string: {0}'.format(search_string)) return self.Search(search_string)
'this gets results of installation process.'
def GetInstallationResults(self):
log.debug('blugger has {0} updates in it'.format(self.install_collection.Count)) if (self.install_collection.Count == 0): return {} updates = [] log.debug('repairing update list') for i in range(self.install_collection.Count): updates.append('{0}: {1}'.format(self.install_results.GetUpdateResult(i).ResultCode, self.install_collection.Item(i).Title)) log.debug('Update results enumerated, now making a library to pass back') results = {} for (i, update) in enumerate(updates): results['update {0}'.format(i)] = update log.debug('Update information complied. returning') return results
'converts the installation results into a pretty print.'
def GetInstallationResultsPretty(self):
updates = self.GetInstallationResults() ret = 'The following are the updates and their return codes.\n' for i in updates: ret += ' DCTB {0}\n'.format(updates[i]) return ret
'Reduce full updates information to the most important information.'
def GetSearchResults(self, fields=None):
updates_verbose = self.GetSearchResultsVerbose() if (fields is not None): updates = [dict(((k, v) for (k, v) in update.items() if (k in fields))) for update in updates_verbose] return updates return [update['Title'] for update in updates_verbose]
'get the value for user in gsettings'
def _get(self):
user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = 'dbus-launch --exit-with-session gsettings get {0} {1}'.format(self.SCHEMA, self.KEY) environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if ('stdout' in result): if ('uint32' in result['stdout']): return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False
'set the value for user in gsettings'
def _set(self, value):
user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = 'dbus-launch --exit-with-session gsettings set {0} {1} "{2}"'.format(self.SCHEMA, self.KEY, str(value)) environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result
'String value, only works for full repr'
def __str__(self):
return self.dict_to_line(self.criteria)
'Always works'
def __repr__(self):
return str(self.criteria)
'Returns an instance with just those keys'
def pick(self, keys):
subset = dict([(key, self.criteria[key]) for key in keys]) return self.__class__(**subset)
'Store non-empty, non-null values to use as filter'
def __init__(self, **criteria):
self.criteria = {key: str(value) for (key, value) in six.iteritems(criteria) if (value is not None)}
'Resolve equivalent paths equivalently'
@staticmethod def norm_path(path):
return os.path.normcase(os.path.normpath(path))
'Compare potentially partial criteria against a complete line'
def match(self, line):
entry = self.dict_from_line(line) for (key, value) in six.iteritems(self.criteria): if (entry[key] != value): return False return True
'Builds the header of a syslog-ng configuration object.'
def build_header(self):
return ''
'Builds the tail of a syslog-ng configuration object.'
def build_tail(self):
return ''
'Builds the body of a syslog-ng configuration object.'
def build_body(self):
_increase_indent() body_array = [x.build() for x in self.iterable] nl = ('\n' if self.append_extra_newline else '') if (len(self.iterable) >= 1): body = (self.join_body_on.join(body_array) + nl) else: body = '' _decrease_indent() return body
'Builds the textual representation of the whole configuration object with it\'s children.'
def build(self):
header = self.build_header() body = self.build_body() tail = self.build_tail() return ((header + body) + tail)
'Alternate constructor that accept multiple recipients and rooms'
@classmethod def create_multi(cls, jid, password, msg, recipients=None, rooms=None, nick='SaltStack Bot'):
obj = SendMsgBot(jid, password, None, msg) obj.recipients = ([] if (recipients is None) else recipients) obj.rooms = ([] if (rooms is None) else rooms) obj.nick = nick return obj
'Constructor'
def __init__(self):
self.__called = False self._reset()
'Resets values of the call setup. :return:'
def _reset(self):
self.__cmd = ['zypper', '--non-interactive'] self.__exit_code = 0 self.__call_result = dict() self.__error_msg = '' self.__env = {'SALT_RUNNING': '1'} self.__xml = False self.__no_lock = False self.__no_raise = False self.__refresh = False self.__ignore_repo_failure = False self.__systemd_scope = False
':param args: :param kwargs: :return:'
def __call__(self, *args, **kwargs):
if ('no_repo_failure' in kwargs): self.__ignore_repo_failure = kwargs['no_repo_failure'] if ('systemd_scope' in kwargs): self.__systemd_scope = kwargs['systemd_scope'] return self
'Call configurator. :param item: :return:'
def __getattr__(self, item):
if self.__called: self._reset() self.__called = False if (item == 'xml'): self.__xml = True elif (item == 'nolock'): self.__no_lock = True elif (item == 'noraise'): self.__no_raise = True elif (item == 'refreshable'): self.__refresh = True elif (item == 'call'): return self.__call else: return self.__dict__[item] if self.__no_lock: self.__no_lock = (not self.__refresh) return self
'Is this is an error code? :return:'
def _is_error(self):
return (self.exit_code not in self.SUCCESS_EXIT_CODES)
'Is this is a lock error code? :return:'
def _is_lock(self):
return (self.exit_code == self.LOCK_EXIT_CODE)
'Is Zypper\'s output is in XML format? :return:'
def _is_xml_mode(self):
return (([itm for itm in self.XML_DIRECTIVES if (itm in self.__cmd)] and True) or False)
'Check and set the result of a zypper command. In case of an error, either raise a CommandExecutionError or extract the error. result The result of a zypper command called with cmd.run_all'
def _check_result(self):
if (not self.__call_result): raise CommandExecutionError('No output result from Zypper?') self.exit_code = self.__call_result['retcode'] if self._is_lock(): return False if self._is_error(): _error_msg = list() if (not self._is_xml_mode()): msg = ((self.__call_result['stderr'] and self.__call_result['stderr'].strip()) or '') if msg: _error_msg.append(msg) else: try: doc = dom.parseString(self.__call_result['stdout']) except ExpatError as err: log.error(err) doc = None if doc: msg_nodes = doc.getElementsByTagName('message') for node in msg_nodes: if (node.getAttribute('type') == 'error'): _error_msg.append(node.childNodes[0].nodeValue) elif self.__call_result['stderr'].strip(): _error_msg.append(self.__call_result['stderr'].strip()) self.error_msg = _error_msg return True
'Call Zypper. :param state: :return:'
def __call(self, *args, **kwargs):
self.__called = True if self.__xml: self.__cmd.append('--xmlout') if (not self.__refresh): self.__cmd.append('--no-refresh') self.__cmd.extend(args) kwargs['output_loglevel'] = 'trace' kwargs['python_shell'] = False kwargs['env'] = self.__env.copy() if self.__no_lock: kwargs['env']['ZYPP_READONLY_HACK'] = '1' was_blocked = False while True: cmd = [] if self.__systemd_scope: cmd.extend(['systemd-run', '--scope']) cmd.extend(self.__cmd) log.debug(('Calling Zypper: ' + ' '.join(cmd))) self.__call_result = __salt__['cmd.run_all'](cmd, **kwargs) if self._check_result(): break if os.path.exists(self.ZYPPER_LOCK): try: with salt.utils.files.fopen(self.ZYPPER_LOCK) as rfh: data = __salt__['ps.proc_info'](int(rfh.readline()), attrs=['pid', 'name', 'cmdline', 'create_time']) data['cmdline'] = ' '.join(data['cmdline']) data['info'] = 'Blocking process created at {0}.'.format(datetime.datetime.utcfromtimestamp(data['create_time']).isoformat()) data['success'] = True except Exception as err: data = {'info': 'Unable to retrieve information about blocking process: {0}'.format(err.message), 'success': False} else: data = {'info': 'Zypper is locked, but no Zypper lock has been found.', 'success': False} if (not data['success']): log.debug('Unable to collect data about blocking process.') else: log.debug('Collected data about blocking process.') __salt__['event.fire_master'](data, self.TAG_BLOCKED) log.debug('Fired a Zypper blocked event to the master with the data: {0}'.format(str(data))) log.debug('Waiting 5 seconds for Zypper gets released...') time.sleep(5) if (not was_blocked): was_blocked = True if was_blocked: __salt__['event.fire_master']({'success': (not len(self.error_msg)), 'info': (self.error_msg or 'Zypper has been released')}, self.TAG_RELEASED) if (self.error_msg and (not self.__no_raise) and (not self.__ignore_repo_failure)): raise CommandExecutionError('Zypper command failure: {0}'.format(self.error_msg)) return ((self._is_xml_mode() and dom.parseString(self.__call_result['stdout'])) or self.__call_result['stdout'])
':type zypper: a reference to an instance of a _Zypper class.'
def __init__(self, zypper):
self.name = None self.version = None self.zypper = zypper self._attr_solvable_version = 'edition' self._op = None
'Convert a string wildcard to a zypper query. :param pkg_name: :param pkg_version: :return:'
def __call__(self, pkg_name, pkg_version):
if pkg_version: self.name = pkg_name self._set_version(pkg_version) versions = sorted([LooseVersion(vrs) for vrs in self._get_scope_versions(self._get_available_versions())]) return ((versions and '{0}{1}'.format((self._op or ''), versions[(-1)])) or None)
'Get available versions of the package. :return:'
def _get_available_versions(self):
solvables = self.zypper.nolock.xml.call('se', '-xv', self.name).getElementsByTagName('solvable') if (not solvables): raise CommandExecutionError("No packages found matching '{0}'".format(self.name)) return sorted(set([slv.getAttribute(self._attr_solvable_version) for slv in solvables if slv.getAttribute(self._attr_solvable_version)]))
'Get available difference between next possible matches. :return:'
def _get_scope_versions(self, pkg_versions):
get_in_versions = [] for p_version in pkg_versions: if fnmatch.fnmatch(p_version, self.version): get_in_versions.append(p_version) return get_in_versions
'Stash operator from the version, if any. :return:'
def _set_version(self, version):
if (not version): return exact_version = re.sub('[<>=+]*', '', version) self._op = (version.replace(exact_version, '') or None) if (self._op and (self._op not in self.Z_OP)): raise CommandExecutionError('Zypper do not supports operator "{0}".'.format(self._op)) self.version = exact_version
'Constructor. :param options: :param db_path: :param pid_file:'
def __init__(self, cachedir=None, piddir=None, pidfilename=None):
if ((not cachedir) and ('__salt__' in globals())): cachedir = globals().get('__salt__')['config.get']('inspector.db', '') self.dbfile = os.path.join((cachedir or self.DEFAULT_CACHE_PATH), self.DB_FILE) self.db = DBHandle(self.dbfile) if ((not piddir) and ('__salt__' in globals())): piddir = globals().get('__salt__')['config.get']('inspector.pid', '') self.pidfile = os.path.join((piddir or self.DEFAULT_PID_PATH), (pidfilename or self.PID_FILE))
'Load data by keys. :param data: :return:'
def load(self, **descr):
for (obj, data) in descr.items(): setattr(self._data, obj, data) return self
'Export to the Kiwi config.xml as text. :return:'
def export(self, name):
self.name = name root = self._create_doc() self._set_description(root) self._set_preferences(root) self._set_repositories(root) self._set_users(root) self._set_packages(root) return '\n'.join([line for line in minidom.parseString(etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=' ').split('\n') if line.strip()])
'Get package manager. :return:'
def _get_package_manager(self):
ret = None if (self.__grains__.get('os_family') in ('Kali', 'Debian')): ret = 'apt-get' elif (self.__grains__.get('os_family', '') == 'Suse'): ret = 'zypper' elif (self.__grains__.get('os_family', '') == 'redhat'): ret = 'yum' if (ret is None): raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family'))) return ret
'Set preferences. :return:'
def _set_preferences(self, node):
pref = etree.SubElement(node, 'preferences') pacman = etree.SubElement(pref, 'packagemanager') pacman.text = self._get_package_manager() p_version = etree.SubElement(pref, 'version') p_version.text = '0.0.1' p_type = etree.SubElement(pref, 'type') p_type.set('image', 'vmx') for (disk_id, disk_data) in self._data.system.get('disks', {}).items(): if disk_id.startswith('/dev'): p_type.set('filesystem', (disk_data.get('type') or 'ext3')) break p_type.set('installiso', 'true') p_type.set('boot', 'vmxboot/suse-leap42.1') p_type.set('format', self.format) p_type.set('bootloader', 'grub2') p_type.set('timezone', __salt__['timezone.get_zone']()) p_type.set('hwclock', __salt__['timezone.get_hwclock']()) return pref
'Get user groups. :param user: :return:'
def _get_user_groups(self, user):
return ([g.gr_name for g in grp.getgrall() if (user in g.gr_mem)] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name])
'Create existing local users. <users group="root"> <user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/> </users> :param node: :return:'
def _set_users(self, node):
shadow = {} with salt.utils.files.fopen('/etc/shadow') as rfh: for sh_line in rfh.read().split(os.linesep): if sh_line.strip(): (login, pwd) = sh_line.split(':')[:2] if (pwd and (pwd[0] not in '!*')): shadow[login] = {'p': pwd} with salt.utils.files.fopen('/etc/passwd') as rfh: for ps_line in rfh.read().split(os.linesep): if ps_line.strip(): ps_line = ps_line.strip().split(':') if (ps_line[0] in shadow): shadow[ps_line[0]]['h'] = ps_line[5] shadow[ps_line[0]]['s'] = ps_line[6] shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0]) users_groups = [] users_node = etree.SubElement(node, 'users') for (u_name, u_data) in shadow.items(): user_node = etree.SubElement(users_node, 'user') user_node.set('password', u_data['p']) user_node.set('home', u_data['h']) user_node.set('name', u_name) users_groups.extend(u_data['g']) users_node.set('group', ','.join(users_groups)) return users_node
'Create repositories. :param node: :return:'
def _set_repositories(self, node):
priority = 99 for (repo_id, repo_data) in self._data.software.get('repositories', {}).items(): if (type(repo_data) == list): repo_data = repo_data[0] if (repo_data.get('enabled') or (not repo_data.get('disabled'))): uri = repo_data.get('baseurl', repo_data.get('uri')) if (not uri): continue repo = etree.SubElement(node, 'repository') if (self.__grains__.get('os_family') in ('Kali', 'Debian')): repo.set('alias', repo_id) repo.set('distribution', repo_data['dist']) else: repo.set('alias', repo_data['alias']) if (self.__grains__.get('os_family', '') == 'Suse'): repo.set('type', 'yast2') repo.set('priority', str(priority)) source = etree.SubElement(repo, 'source') source.set('path', uri) priority -= 1
'Set packages and collections. :param node: :return:'
def _set_packages(self, node):
pkgs = etree.SubElement(node, 'packages') for (pkg_name, pkg_version) in sorted(self._data.software.get('packages', {}).items()): pkg = etree.SubElement(pkgs, 'package') pkg.set('name', pkg_name) if (self.__grains__.get('os_family', '') == 'Suse'): for (ptn_id, ptn_data) in self._data.software.get('patterns', {}).items(): if ptn_data.get('installed'): ptn = etree.SubElement(pkgs, 'namedCollection') ptn.set('name', ptn_id) return pkgs
'Create a system description. :return:'
def _set_description(self, node):
hostname = (socket.getfqdn() or platform.node()) descr = etree.SubElement(node, 'description') author = etree.SubElement(descr, 'author') author.text = 'salt.modules.node on {0}'.format(hostname) contact = etree.SubElement(descr, 'contact') contact.text = 'root@{0}'.format(hostname) specs = etree.SubElement(descr, 'specification') specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname) return descr
'Create document. :return:'
def _create_doc(self):
root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
'An alias for grains getter.'
def _grain(self, grain):
return __grains__.get(grain, 'N/A')
'Get a size of a disk.'
def _get_disk_size(self, device):
out = __salt__['cmd.run_all']('df {0}'.format(device)) if out['retcode']: msg = 'Disk size info error: {0}'.format(out['stderr']) log.error(msg) raise SIException(msg) (devpath, blocks, used, available, used_p, mountpoint) = [elm for elm in out['stdout'].split(os.linesep)[(-1)].split(' ') if elm] return {'device': devpath, 'blocks': blocks, 'used': used, 'available': available, 'used (%)': used_p, 'mounted': mountpoint}
'Get available file systems and their types.'
def _get_fs(self):
data = dict() for (dev, dev_data) in salt.utils.fsutils._blkid().items(): dev = self._get_disk_size(dev) device = dev.pop('device') dev['type'] = dev_data['type'] data[device] = dev return data
'Get mounted FS on the system.'
def _get_mounts(self):
return salt.utils.fsutils._get_mounts()
'Get available CPU information.'
def _get_cpu(self):
out = __salt__['cmd.run_all']('lscpu') salt.utils.fsutils._verify_run(out) data = dict() for (descr, value) in [elm.split(':', 1) for elm in out['stdout'].split(os.linesep)]: data[descr.strip()] = value.strip() return data
'Get memory.'
def _get_mem(self):
out = __salt__['cmd.run_all']('vmstat -s') if out['retcode']: raise SIException('Memory info error: {0}'.format(out['stderr'])) ret = dict() for line in out['stdout'].split(os.linesep): line = line.strip() if (not line): continue (size, descr) = line.split(' ', 1) if descr.startswith('K '): descr = descr[2:] size = (size + 'K') ret[descr] = size return ret
'Get network configuration.'
def _get_network(self):
data = dict() data['interfaces'] = salt.utils.network.interfaces() data['subnets'] = salt.utils.network.subnets() return data
'Get operating system summary'
def _get_os(self):
return {'name': self._grain('os'), 'family': self._grain('os_family'), 'arch': self._grain('osarch'), 'release': self._grain('osrelease')}
'Constructor. :param scope: :return:'
def __init__(self, scope, cachedir=None):
if (scope and (scope not in self.SCOPES)): raise InspectorQueryException('Unknown scope: {0}. Must be one of: {1}'.format(repr(scope), ', '.join(self.SCOPES))) elif (not scope): raise InspectorQueryException('Scope cannot be empty. Must be one of: {0}'.format(', '.join(self.SCOPES))) EnvLoader.__init__(self, cachedir=cachedir) self.scope = ('_' + scope) self.local_identity = dict()
'Call the query with the defined scope. :param args: :param kwargs: :return:'
def __call__(self, *args, **kwargs):
return getattr(self, self.scope)(*args, **kwargs)
'Returns all diffs to the configuration files.'
def _changes(self, *args, **kwargs):
raise Exception('Not yet implemented')
'Return configuration files.'
def _configuration(self, *args, **kwargs):
data = dict() self.db.open() for pkg in self.db.get(Package): configs = list() for pkg_cfg in self.db.get(PackageCfgFile, eq={'pkgid': pkg.id}): configs.append(pkg_cfg.path) data[pkg.name] = configs if (not data): raise InspectorQueryException('No inspected configuration yet available.') return data
'Return all known local accounts to the system.'
def _get_local_users(self, disabled=None):
users = dict() path = '/etc/passwd' with salt.utils.files.fopen(path, 'r') as fp_: for line in fp_: line = line.strip() if (':' not in line): continue (name, password, uid, gid, gecos, directory, shell) = line.split(':') active = (not ((password == '*') or password.startswith('!'))) if (((disabled is False) and active) or ((disabled is True) and (not active)) or (disabled is None)): users[name] = {'uid': uid, 'git': gid, 'info': gecos, 'home': directory, 'shell': shell, 'disabled': (not active)} return users
'Return all known local groups to the system.'
def _get_local_groups(self):
groups = dict() path = '/etc/group' with salt.utils.files.fopen(path, 'r') as fp_: for line in fp_: line = line.strip() if (':' not in line): continue (name, password, gid, users) = line.split(':') groups[name] = {'gid': gid} if users: groups[name]['users'] = users.split(',') return groups
'Return all known accounts, excluding local accounts.'
def _get_external_accounts(self, locals):
users = dict() out = __salt__['cmd.run_all']('passwd -S -a') if out['retcode']: return users status = {'L': 'Locked', 'NP': 'No password', 'P': 'Usable password', 'LK': 'Locked'} for data in [elm.strip().split(' ') for elm in out['stdout'].split(os.linesep) if elm.strip()]: if (len(data) < 2): continue (name, login) = data[:2] if (name not in locals): users[name] = {'login': login, 'status': status.get(login, 'N/A')} return users
'Local users and groups. accounts Can be either \'local\', \'remote\' or \'all\' (equal to "local,remote"). Remote accounts cannot be resolved on all systems, but only those, which supports \'passwd -S -a\'. disabled True (or False, default) to return only disabled accounts.'
def _identity(self, *args, **kwargs):
LOCAL = 'local accounts' EXT = 'external accounts' data = dict() data[LOCAL] = self._get_local_users(disabled=kwargs.get('disabled')) data[EXT] = (self._get_external_accounts(data[LOCAL].keys()) or 'N/A') data['local groups'] = self._get_local_groups() return data
'This basically calls grains items and picks out only necessary information in a certain structure. :param args: :param kwargs: :return:'
def _system(self, *args, **kwargs):
sysinfo = SysInfo(__grains__.get('kernel')) data = dict() data['cpu'] = sysinfo._get_cpu() data['disks'] = sysinfo._get_fs() data['mounts'] = sysinfo._get_mounts() data['memory'] = sysinfo._get_mem() data['network'] = sysinfo._get_network() data['os'] = sysinfo._get_os() return data
'Return installed software.'
def _software(self, *args, **kwargs):
data = dict() if ('exclude' in kwargs): excludes = kwargs['exclude'].split(',') else: excludes = list() os_family = __grains__.get('os_family').lower() if (os_family == 'suse'): LOCKS = 'pkg.list_locks' if ('products' not in excludes): products = __salt__['pkg.list_products']() if products: data['products'] = products elif (os_family == 'redhat'): LOCKS = 'pkg.get_locked_packages' else: LOCKS = None if (LOCKS and ('locks' not in excludes)): locks = __salt__[LOCKS]() if locks: data['locks'] = locks if (os_family == 'suse'): PATTERNS = 'pkg.list_installed_patterns' elif (os_family == 'redhat'): PATTERNS = 'pkg.group_list' else: PATTERNS = None if (PATTERNS and ('patterns' not in excludes)): patterns = __salt__[PATTERNS]() if patterns: data['patterns'] = patterns if ('packages' not in excludes): data['packages'] = __salt__['pkg.list_pkgs']() if ('repositories' not in excludes): repos = __salt__['pkg.list_repos']() if repos: data['repositories'] = repos return data
'Get list of enabled and disabled services on the particular system.'
def _services(self, *args, **kwargs):
return {'enabled': __salt__['service.get_enabled'](), 'disabled': __salt__['service.get_disabled']()}
'Resolve local users and groups. :param iid: :param named: :param uid: :return:'
def _id_resolv(self, iid, named=True, uid=True):
if (not self.local_identity): self.local_identity['users'] = self._get_local_users() self.local_identity['groups'] = self._get_local_groups() if (not named): return iid for (name, meta) in self.local_identity[((uid and 'users') or 'groups')].items(): if ((uid and (int(meta.get('uid', (-1))) == iid)) or ((not uid) and (int(meta.get('gid', (-1))) == iid))): return name return iid
'Find all unmanaged files. Returns maximum 1000 values. Parameters: * **filter**: Include only results which path starts from the filter string. * **time**: Display time in Unix ticks or format according to the configured TZ (default) Values: ticks, tz (default) * **size**: Format size. Values: B, KB, MB, GB * **owners**: Resolve UID/GID to an actual names or leave them numeric (default). Values: name (default), id * **type**: Comma-separated type of included payload: dir (or directory), link and/or file. * **brief**: Return just a list of matches, if True. Default: False * **offset**: Offset of the files * **max**: Maximum returned values. Default 1000. Options: * **total**: Return a total amount of found payload files'
def _payload(self, *args, **kwargs):
def _size_format(size, fmt): if (fmt is None): return size fmt = fmt.lower() if (fmt == 'b'): return '{0} Bytes'.format(size) elif (fmt == 'kb'): return '{0} Kb'.format(round((float(size) / 1024), 2)) elif (fmt == 'mb'): return '{0} Mb'.format(round(((float(size) / 1024) / 1024), 2)) elif (fmt == 'gb'): return '{0} Gb'.format(round((((float(size) / 1024) / 1024) / 1024), 2)) filter = kwargs.get('filter') offset = kwargs.get('offset', 0) timeformat = kwargs.get('time', 'tz') if (timeformat not in ['ticks', 'tz']): raise InspectorQueryException('Unknown "{0}" value for parameter "time"'.format(timeformat)) tfmt = (lambda param: (((timeformat == 'tz') and time.strftime('%b %d %Y %H:%M:%S', time.gmtime(param))) or int(param))) size_fmt = kwargs.get('size') if ((size_fmt is not None) and (size_fmt.lower() not in ['b', 'kb', 'mb', 'gb'])): raise InspectorQueryException('Unknown "{0}" value for parameter "size". Should be either B, Kb, Mb or Gb'.format(timeformat)) owners = kwargs.get('owners', 'id') if (owners not in ['name', 'id']): raise InspectorQueryException('Unknown "{0}" value for parameter "owners". Should be either name or id (default)'.format(owners)) incl_type = [prm for prm in kwargs.get('type', '').lower().split(',') if prm] if (not incl_type): incl_type.append('file') for i_type in incl_type: if (i_type not in ['directory', 'dir', 'd', 'file', 'f', 'link', 'l']): raise InspectorQueryException('Unknown "{0}" values for parameter "type". Should be comma separated one or more of dir, file and/or link.'.format(', '.join(incl_type))) self.db.open() if ('total' in args): return {'total': len(self.db.get(PayloadFile))} brief = kwargs.get('brief') pld_files = (list() if brief else dict()) for pld_data in self.db.get(PayloadFile)[offset:(offset + kwargs.get('max', 1000))]: if brief: pld_files.append(pld_data.path) else: pld_files[pld_data.path] = {'uid': self._id_resolv(pld_data.uid, named=(owners == 'id')), 'gid': self._id_resolv(pld_data.gid, named=(owners == 'id'), uid=False), 'size': _size_format(pld_data.p_size, fmt=size_fmt), 'mode': oct(pld_data.mode), 'accessed': tfmt(pld_data.atime), 'modified': tfmt(pld_data.mtime), 'created': tfmt(pld_data.ctime)} return pld_files
'Return all the summary of the particular system.'
def _all(self, *args, **kwargs):
data = dict() data['software'] = self._software(**kwargs) data['system'] = self._system(**kwargs) data['services'] = self._services(**kwargs) try: data['configuration'] = self._configuration(**kwargs) except InspectorQueryException as ex: data['configuration'] = 'N/A' log.error(ex) data['payload'] = (self._payload(**kwargs) or 'N/A') return data
'Constructor.'
def __init__(self, path):
self._path = path self.init_queries = list() self._db = CsvDB(self._path)
'Init the database, if required.'
def open(self, new=False):
(self._db.new() if new else self._db.open()) self._run_init_queries()
'Initialization queries'
def _run_init_queries(self):
for obj in (Package, PackageCfgFile, PayloadFile, IgnoredDir, AllowedDir): self._db.create_table_from_object(obj())
'Purge whole database.'
def purge(self):
for table_name in self._db.list_tables(): self._db.flush(table_name) self._run_init_queries()
'Flush the table.'
def flush(self, table):
self._db.flush(table)
'Close the database connection.'
def close(self):
self._db.close()
'Proxy methods from the Database instance. :param item: :return:'
def __getattr__(self, item):
return getattr(self._db, item)
'Keep singleton.'
def __new__(cls, *args, **kwargs):
if (not cls.__instance): cls.__instance = super(DBHandle, cls).__new__(cls) return cls.__instance
'Database handle for the specific :param path: :return:'
def __init__(self, path):
DBHandleBase.__init__(self, path)
'Serialize the object to a row for CSV according to the table description. :return:'
def _serialize(self, description):
return [getattr(self, attr) for attr in description]
'Constructor to store the database files. :param path:'
def __init__(self, path):
self._prepare(path) self._opened = False self.db_path = None self._opened = False self._tables = {}
'Create label of the database, based on the date-time. :return:'
def _label(self):
return datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')
'Create a new database and opens it. :return:'
def new(self):
dbname = self._label() self.db_path = os.path.join(self.path, dbname) if (not os.path.exists(self.db_path)): os.makedirs(self.db_path) self._opened = True self.list_tables() return dbname
'Purge the database. :param dbid: :return:'
def purge(self, dbid):
db_path = os.path.join(self.path, dbid) if os.path.exists(db_path): shutil.rmtree(db_path, ignore_errors=True) return True return False
'Flush table. :param table: :return:'
def flush(self, table):
table_path = os.path.join(self.db_path, table) if os.path.exists(table_path): os.unlink(table_path)
'List all the databases on the given path. :return:'
def list(self):
databases = [] for dbname in os.listdir(self.path): databases.append(dbname) return list(reversed(sorted(databases)))
'Load existing tables and their descriptions. :return:'
def list_tables(self):
if (not self._tables): for table_name in os.listdir(self.db_path): self._tables[table_name] = self._load_table(table_name) return self._tables.keys()
'Open database from the path with the name or latest. If there are no yet databases, create a new implicitly. :return:'
def open(self, dbname=None):
databases = self.list() if self.is_closed(): self.db_path = os.path.join(self.path, (dbname or ((databases and databases[0]) or self.new()))) if (not self._opened): self.list_tables() self._opened = True
'Close the database. :return:'
def close(self):
self._opened = False