desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Test if this address is allocated for public networks. Returns: A boolean, True if the address is not reserved per iana-ipv4-special-registry.'
@property def is_global(self):
return ((not ((self.network_address in IPv4Network('100.64.0.0/10')) and (self.broadcast_address in IPv4Network('100.64.0.0/10')))) and (not self.is_private))
'Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: An int, the IPv6 address Raises: AddressValueError: if ip_str isn\'t a valid IPv6 Address.'
def _ip_int_from_string(self, ip_str):
if (not ip_str): raise AddressValueError('Address cannot be empty') parts = ip_str.split(':') _min_parts = 3 if (len(parts) < _min_parts): msg = ('At least %d parts expected in %r' % (_min_parts, ip_str)) raise AddressValueError(msg) if ('.' in parts[(-1)]): try: ipv4_int = IPv4Address(parts.pop())._ip except AddressValueError as exc: raise AddressValueError(('%s in %r' % (exc, ip_str))) parts.append(('%x' % ((ipv4_int >> 16) & 65535))) parts.append(('%x' % (ipv4_int & 65535))) _max_parts = (self._HEXTET_COUNT + 1) if (len(parts) > _max_parts): msg = ('At most %d colons permitted in %r' % ((_max_parts - 1), ip_str)) raise AddressValueError(msg) skip_index = None for i in range(1, (len(parts) - 1)): if (not parts[i]): if (skip_index is not None): msg = ("At most one '::' permitted in %r" % ip_str) raise AddressValueError(msg) skip_index = i if (skip_index is not None): parts_hi = skip_index parts_lo = ((len(parts) - skip_index) - 1) if (not parts[0]): parts_hi -= 1 if parts_hi: msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError((msg % ip_str)) if (not parts[(-1)]): parts_lo -= 1 if parts_lo: msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError((msg % ip_str)) parts_skipped = (self._HEXTET_COUNT - (parts_hi + parts_lo)) if (parts_skipped < 1): msg = "Expected at most %d other parts with '::' in %r" raise AddressValueError((msg % ((self._HEXTET_COUNT - 1), ip_str))) else: if (len(parts) != self._HEXTET_COUNT): msg = "Exactly %d parts expected without '::' in %r" raise AddressValueError((msg % (self._HEXTET_COUNT, ip_str))) if (not parts[0]): msg = "Leading ':' only permitted as part of '::' in %r" raise AddressValueError((msg % ip_str)) if (not parts[(-1)]): msg = "Trailing ':' only permitted as part of '::' in %r" raise AddressValueError((msg % ip_str)) parts_hi = len(parts) parts_lo = 0 parts_skipped = 0 try: ip_int = 0 for i in range(parts_hi): ip_int <<= 16 ip_int |= self._parse_hextet(parts[i]) ip_int <<= (16 * parts_skipped) for i in range((- parts_lo), 0): ip_int <<= 16 ip_int |= self._parse_hextet(parts[i]) return ip_int except ValueError as exc: raise AddressValueError(('%s in %r' % (exc, ip_str)))
'Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn\'t strictly a hex number from [0..FFFF].'
def _parse_hextet(self, hextet_str):
if (not self._HEX_DIGITS.issuperset(hextet_str)): raise ValueError(('Only hex digits permitted in %r' % hextet_str)) if (len(hextet_str) > 4): msg = 'At most 4 characters permitted in %r' raise ValueError((msg % hextet_str)) return int(hextet_str, 16)
'Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings.'
def _compress_hextets(self, hextets):
best_doublecolon_start = (-1) best_doublecolon_len = 0 doublecolon_start = (-1) doublecolon_len = 0 for (index, hextet) in enumerate(hextets): if (hextet == '0'): doublecolon_len += 1 if (doublecolon_start == (-1)): doublecolon_start = index if (doublecolon_len > best_doublecolon_len): best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = (-1) if (best_doublecolon_len > 1): best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) if (best_doublecolon_end == len(hextets)): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] if (best_doublecolon_start == 0): hextets = ([''] + hextets) return hextets
'Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones.'
def _string_from_ip_int(self, ip_int=None):
if (ip_int is None): ip_int = int(self._ip) if (ip_int > self._ALL_ONES): raise ValueError('IPv6 address is too large') hex_str = ('%032x' % ip_int) hextets = [('%x' % int(hex_str[x:(x + 4)], 16)) for x in range(0, 32, 4)] hextets = self._compress_hextets(hextets) return ':'.join(hextets)
'Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address.'
def _explode_shorthand_ip_string(self):
if isinstance(self, IPv6Network): ip_str = str(self.network_address) elif isinstance(self, IPv6Interface): ip_str = str(self.ip) else: ip_str = str(self) ip_int = self._ip_int_from_string(ip_str) hex_str = ('%032x' % ip_int) parts = [hex_str[x:(x + 4)] for x in range(0, 32, 4)] if isinstance(self, (_BaseNetwork, IPv6Interface)): return ('%s/%d' % (':'.join(parts), self._prefixlen)) return ':'.join(parts)
'Return the reverse DNS pointer name for the IPv6 address. This implements the method described in RFC3596 2.5.'
def _reverse_pointer(self):
reverse_chars = self.exploded[::(-1)].replace(':', '') return ('.'.join(reverse_chars) + '.ip6.arpa')
'Instantiate a new IPv6 address object. Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv6Address(\'2001:db8::\') == IPv6Address(42540766411282592856903984951653826560) or, more generally IPv6Address(int(IPv6Address(\'2001:db8::\'))) == IPv6Address(\'2001:db8::\') Raises: AddressValueError: If address isn\'t a valid IPv6 address.'
def __init__(self, address):
_BaseAddress.__init__(self, address) _BaseV6.__init__(self, address) if isinstance(address, int): self._check_int_address(address) self._ip = address return if isinstance(address, bytes): self._check_packed_address(address, 16) self._ip = _int_from_bytes(address, 'big') return addr_str = str(address) self._ip = self._ip_int_from_string(addr_str)
'The binary representation of this address.'
@property def packed(self):
return v6_int_to_packed(self._ip)
'Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details.'
@property def is_multicast(self):
multicast_network = IPv6Network('ff00::/8') return (self in multicast_network)
'Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges.'
@property def is_reserved(self):
reserved_networks = [IPv6Network('::/8'), IPv6Network('100::/8'), IPv6Network('200::/7'), IPv6Network('400::/6'), IPv6Network('800::/5'), IPv6Network('1000::/4'), IPv6Network('4000::/3'), IPv6Network('6000::/3'), IPv6Network('8000::/3'), IPv6Network('A000::/3'), IPv6Network('C000::/3'), IPv6Network('E000::/4'), IPv6Network('F000::/5'), IPv6Network('F800::/6'), IPv6Network('FE00::/9')] return any(((self in x) for x in reserved_networks))
'Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291.'
@property def is_link_local(self):
linklocal_network = IPv6Network('fe80::/10') return (self in linklocal_network)
'Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6.'
@property def is_site_local(self):
sitelocal_network = IPv6Network('fec0::/10') return (self in sitelocal_network)
'Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv6-special-registry.'
@property def is_private(self):
return ((self in IPv6Network('::1/128')) or (self in IPv6Network('::/128')) or (self in IPv6Network('::ffff:0:0/96')) or (self in IPv6Network('100::/64')) or (self in IPv6Network('2001::/23')) or (self in IPv6Network('2001:2::/48')) or (self in IPv6Network('2001:db8::/32')) or (self in IPv6Network('2001:10::/28')) or (self in IPv6Network('fc00::/7')) or (self in IPv6Network('fe80::/10')))
'Test if this address is allocated for public networks. Returns: A boolean, true if the address is not reserved per iana-ipv6-special-registry.'
@property def is_global(self):
return (not self.is_private)
'Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2.'
@property def is_unspecified(self):
return (self._ip == 0)
'Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3.'
@property def is_loopback(self):
return (self._ip == 1)
'Return the IPv4 mapped address. Returns: If the IPv6 address is a v4 mapped address, return the IPv4 mapped address. Return None otherwise.'
@property def ipv4_mapped(self):
if ((self._ip >> 32) != 65535): return None return IPv4Address((self._ip & 4294967295))
'Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn\'t appear to be a teredo address (doesn\'t start with 2001::/32)'
@property def teredo(self):
if ((self._ip >> 96) != 536936448): return None return (IPv4Address(((self._ip >> 64) & 4294967295)), IPv4Address(((~ self._ip) & 4294967295)))
'Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn\'t appear to contain a 6to4 embedded address.'
@property def sixtofour(self):
if ((self._ip >> 112) != 8194): return None return IPv4Address(((self._ip >> 80) & 4294967295))
'Instantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. \'2001:db8::/128\' \'2001:db8:0000:0000:0000:0000:0000:0000/128\' \'2001:db8::\' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network(\'2001:db8::\') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network(\'2001:db8::\'))) == IPv6Network(\'2001:db8::\') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn\'t a valid IPv6 address. NetmaskValueError: If the netmask isn\'t valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied.'
def __init__(self, address, strict=True):
_BaseV6.__init__(self, address) _BaseNetwork.__init__(self, address) if isinstance(address, int): self.network_address = IPv6Address(address) self._prefixlen = self._max_prefixlen self.netmask = IPv6Address(self._ALL_ONES) return if isinstance(address, bytes): self.network_address = IPv6Address(address) self._prefixlen = self._max_prefixlen self.netmask = IPv6Address(self._ALL_ONES) return addr = _split_optional_netmask(address) self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) if (len(addr) == 2): self._prefixlen = self._prefix_from_prefix_string(addr[1]) else: self._prefixlen = self._max_prefixlen self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen)) if strict: if (IPv6Address((int(self.network_address) & int(self.netmask))) != self.network_address): raise ValueError(('%s has host bits set' % self)) self.network_address = IPv6Address((int(self.network_address) & int(self.netmask))) if (self._prefixlen == (self._max_prefixlen - 1)): self.hosts = self.__iter__
'Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn\'t return the Subnet-Router anycast address.'
def hosts(self):
network = int(self.network_address) broadcast = int(self.broadcast_address) for x in long_range(1, ((broadcast - network) + 1)): (yield self._address_class((network + x)))
'Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6.'
@property def is_site_local(self):
return (self.network_address.is_site_local and self.broadcast_address.is_site_local)
'Return the backend list'
def _gen_back(self, back):
if (not back): back = self.opts['fileserver_backend'] elif (not isinstance(back, list)): try: back = back.split(',') except AttributeError: back = six.text_type(back).split(',') if isinstance(back, collections.Sequence): back = list(back) ret = [] if (not isinstance(back, list)): return ret try: subtract_only = all((x.startswith('-') for x in back)) except AttributeError: pass else: if subtract_only: ret = self.opts['fileserver_backend'] for sub in back: if ('{0}.envs'.format(sub[1:]) in self.servers): ret.remove(sub[1:]) elif ('{0}.envs'.format(sub[1:(-2)]) in self.servers): ret.remove(sub[1:(-2)]) return ret for sub in back: if ('{0}.envs'.format(sub) in self.servers): ret.append(sub) elif ('{0}.envs'.format(sub[:(-2)]) in self.servers): ret.append(sub[:(-2)]) return ret
'Simplify master opts'
def master_opts(self, load):
return self.opts
'Clear the cache of all of the fileserver backends that support the clear_cache function or the named backend(s) only.'
def clear_cache(self, back=None):
back = self._gen_back(back) cleared = [] errors = [] for fsb in back: fstr = '{0}.clear_cache'.format(fsb) if (fstr in self.servers): log.debug('Clearing {0} fileserver cache'.format(fsb)) failed = self.servers[fstr]() if failed: errors.extend(failed) else: cleared.append('The {0} fileserver cache was successfully cleared'.format(fsb)) return (cleared, errors)
'``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked.'
def lock(self, back=None, remote=None):
back = self._gen_back(back) locked = [] errors = [] for fsb in back: fstr = '{0}.lock'.format(fsb) if (fstr in self.servers): msg = 'Setting update lock for {0} remotes'.format(fsb) if remote: if (not isinstance(remote, six.string_types)): errors.append("Badly formatted remote pattern '{0}'".format(remote)) continue else: msg += ' matching {0}'.format(remote) log.debug(msg) (good, bad) = self.servers[fstr](remote=remote) locked.extend(good) errors.extend(bad) return (locked, errors)
'Clear the update lock for the enabled fileserver backends back Only clear the update lock for the specified backend(s). The default is to clear the lock for all enabled backends remote If specified, then any remotes which contain the passed string will have their lock cleared.'
def clear_lock(self, back=None, remote=None):
back = self._gen_back(back) cleared = [] errors = [] for fsb in back: fstr = '{0}.clear_lock'.format(fsb) if (fstr in self.servers): (good, bad) = clear_lock(self.servers[fstr], fsb, remote=remote) cleared.extend(good) errors.extend(bad) return (cleared, errors)
'Update all of the enabled fileserver backends which support the update function, or'
def update(self, back=None):
back = self._gen_back(back) for fsb in back: fstr = '{0}.update'.format(fsb) if (fstr in self.servers): log.debug('Updating {0} fileserver cache'.format(fsb)) self.servers[fstr]()
'Return the environments for the named backend or all backends'
def envs(self, back=None, sources=False):
back = self._gen_back(back) ret = set() if sources: ret = {} for fsb in back: fstr = '{0}.envs'.format(fsb) kwargs = ({'ignore_cache': True} if (('ignore_cache' in _argspec(self.servers[fstr]).args) and (self.opts['__role'] == 'minion')) else {}) if sources: ret[fsb] = self.servers[fstr](**kwargs) else: ret.update(self.servers[fstr](**kwargs)) if sources: return ret return list(ret)
'Initialize the backend, only do so if the fs supports an init function'
def init(self, back=None):
back = self._gen_back(back) for fsb in back: fstr = '{0}.init'.format(fsb) if (fstr in self.servers): self.servers[fstr]()
'Convenience function for calls made using the RemoteClient'
def _find_file(self, load):
path = load.get('path') if (not path): return {'path': '', 'rel': ''} tgt_env = load.get('saltenv', 'base') return self.find_file(path, tgt_env)
'Convenience function for calls made using the LocalClient'
def file_find(self, load):
path = load.get('path') if (not path): return {'path': '', 'rel': ''} tgt_env = load.get('saltenv', 'base') return self.find_file(path, tgt_env)
'Find the path and return the fnd structure, this structure is passed to other backend interfaces.'
def find_file(self, path, saltenv, back=None):
back = self._gen_back(back) kwargs = {} fnd = {'path': '', 'rel': ''} if os.path.isabs(path): return fnd if ('../' in path): return fnd if salt.utils.url.is_escaped(path): path = salt.utils.url.unescape(path) elif ('?' in path): hcomps = path.split('?') path = hcomps[0] comps = hcomps[1].split('&') for comp in comps: if ('=' not in comp): continue args = comp.split('=', 1) kwargs[args[0]] = args[1] if ('env' in kwargs): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") kwargs.pop('env') if ('saltenv' in kwargs): saltenv = kwargs.pop('saltenv') if (not isinstance(saltenv, six.string_types)): saltenv = six.text_type(saltenv) for fsb in back: fstr = '{0}.find_file'.format(fsb) if (fstr in self.servers): fnd = self.servers[fstr](path, saltenv, **kwargs) if fnd.get('path'): fnd['back'] = fsb return fnd return fnd
'Serve up a chunk of a file'
def serve_file(self, load):
ret = {'data': '', 'dest': ''} if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') if (('path' not in load) or ('loc' not in load) or ('saltenv' not in load)): return ret if (not isinstance(load['saltenv'], six.string_types)): load['saltenv'] = six.text_type(load['saltenv']) fnd = self.find_file(load['path'], load['saltenv']) if (not fnd.get('back')): return ret fstr = '{0}.serve_file'.format(fnd['back']) if (fstr in self.servers): return self.servers[fstr](load, fnd) return ret
'Common code for hashing and stating files'
def __file_hash_and_stat(self, load):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') if (('path' not in load) or ('saltenv' not in load)): return ('', None) if (not isinstance(load['saltenv'], six.string_types)): load['saltenv'] = six.text_type(load['saltenv']) fnd = self.find_file(salt.utils.locales.sdecode(load['path']), load['saltenv']) if (not fnd.get('back')): return ('', None) stat_result = fnd.get('stat', None) fstr = '{0}.file_hash'.format(fnd['back']) if (fstr in self.servers): return (self.servers[fstr](load, fnd), stat_result) return ('', None)
'Return the hash of a given file'
def file_hash(self, load):
try: return self.__file_hash_and_stat(load)[0] except (IndexError, TypeError): return ''
'Return the hash and stat result of a given file'
def file_hash_and_stat(self, load):
try: return self.__file_hash_and_stat(load) except (IndexError, TypeError): return ('', None)
'Deletes the file_lists cache files'
def clear_file_list_cache(self, load):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') saltenv = load.get('saltenv', []) if (saltenv is not None): if (not isinstance(saltenv, list)): try: saltenv = [x.strip() for x in saltenv.split(',')] except AttributeError: saltenv = [x.strip() for x in str(saltenv).split(',')] for (idx, val) in enumerate(saltenv): if (not isinstance(val, six.string_types)): saltenv[idx] = six.text_type(val) ret = {} fsb = self._gen_back(load.pop('fsbackend', None)) list_cachedir = os.path.join(self.opts['cachedir'], 'file_lists') try: file_list_backends = os.listdir(list_cachedir) except OSError as exc: if (exc.errno == errno.ENOENT): log.debug('No file list caches found') return {} else: log.error('Failed to get list of saltenvs for which the master has cached file lists: %s', exc) for back in file_list_backends: back_virtualname = re.sub('fs$', '', back) try: cache_files = os.listdir(os.path.join(list_cachedir, back)) except OSError as exc: log.error("Failed to find file list caches for saltenv '%s': %s", back, exc) continue for cache_file in cache_files: try: (cache_saltenv, extension) = cache_file.rsplit('.', 1) except ValueError: continue if (extension != 'p'): continue elif ((back_virtualname not in fsb) or ((saltenv is not None) and (cache_saltenv not in saltenv))): log.debug("Skipping %s file list cache for saltenv '%s'", back, cache_saltenv) continue try: os.remove(os.path.join(list_cachedir, back, cache_file)) except OSError as exc: if (exc.errno != errno.ENOENT): log.error('Failed to remove %s: %s', exc.filename, exc.strerror) else: ret.setdefault(back, []).append(cache_saltenv) log.debug("Removed %s file list cache for saltenv '%s'", cache_saltenv, back) return ret
'Return a list of files from the dominant environment'
def file_list(self, load):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') ret = set() if ('saltenv' not in load): return [] if (not isinstance(load['saltenv'], six.string_types)): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self._gen_back(load.pop('fsbackend', None)): fstr = '{0}.file_list'.format(fsb) if (fstr in self.servers): ret.update(self.servers[fstr](load)) ret = [salt.utils.locales.sdecode(f) for f in ret] prefix = load.get('prefix', '').strip('/') if (prefix != ''): ret = [f for f in ret if f.startswith(prefix)] return sorted(ret)
'List all emptydirs in the given environment'
def file_list_emptydirs(self, load):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') ret = set() if ('saltenv' not in load): return [] if (not isinstance(load['saltenv'], six.string_types)): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self._gen_back(None): fstr = '{0}.file_list_emptydirs'.format(fsb) if (fstr in self.servers): ret.update(self.servers[fstr](load)) ret = [salt.utils.locales.sdecode(f) for f in ret] prefix = load.get('prefix', '').strip('/') if (prefix != ''): ret = [f for f in ret if f.startswith(prefix)] return sorted(ret)
'List all directories in the given environment'
def dir_list(self, load):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') ret = set() if ('saltenv' not in load): return [] if (not isinstance(load['saltenv'], six.string_types)): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self._gen_back(load.pop('fsbackend', None)): fstr = '{0}.dir_list'.format(fsb) if (fstr in self.servers): ret.update(self.servers[fstr](load)) ret = [salt.utils.locales.sdecode(f) for f in ret] prefix = load.get('prefix', '').strip('/') if (prefix != ''): ret = [f for f in ret if f.startswith(prefix)] return sorted(ret)
'Return a list of symlinked files and dirs'
def symlink_list(self, load):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') ret = {} if ('saltenv' not in load): return {} if (not isinstance(load['saltenv'], six.string_types)): load['saltenv'] = six.text_type(load['saltenv']) for fsb in self._gen_back(load.pop('fsbackend', None)): symlstr = '{0}.symlink_list'.format(fsb) if (symlstr in self.servers): ret = self.servers[symlstr](load) ret = dict([(salt.utils.locales.sdecode(x), salt.utils.locales.sdecode(y)) for (x, y) in ret.items()]) prefix = load.get('prefix', '').strip('/') if (prefix != ''): ret = dict([(x, y) for (x, y) in six.iteritems(ret) if x.startswith(prefix)]) return ret
'Emulate the channel send method, the tries and timeout are not used'
def send(self, load, tries=None, timeout=None, raw=False):
if ('cmd' not in load): log.error('Malformed request, no cmd: {0}'.format(load)) return {} cmd = load['cmd'].lstrip('_') if (cmd in self.cmd_stub): return self.cmd_stub[cmd] if (cmd == 'file_envs'): return self.fs.envs() if (not hasattr(self.fs, cmd)): log.error('Malformed request, invalid cmd: {0}'.format(load)) return {} return getattr(self.fs, cmd)(load)
'Return a friendly name for the database, e.g. \'MySQL\' or \'SQLite\'. Used in logging output.'
@classmethod @abc.abstractmethod def _db_name(cls):
pass
'Yield a PEP 249 compliant Cursor as a context manager.'
@abc.abstractmethod def _get_cursor(self):
pass
'This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts.'
def extract_queries(self, args, kwargs):
qbuffer = [] qbuffer.extend([[None, s] for s in args]) klist = list(kwargs.keys()) klist.sort() qbuffer.extend([[k, kwargs[k]] for k in klist]) qbuffer = [x for x in qbuffer if ((isinstance(x[1], six.string_types) and len(x[1])) or (isinstance(x[1], (list, tuple)) and (len(x[1]) > 0) and x[1][0]) or (isinstance(x[1], dict) and ('query' in x[1]) and len(x[1]['query'])))] for qb in qbuffer: defaults = {'query': '', 'depth': 0, 'as_list': False, 'with_lists': None, 'ignore_null': False} if isinstance(qb[1], six.string_types): defaults['query'] = qb[1] elif isinstance(qb[1], (list, tuple)): defaults['query'] = qb[1][0] if (len(qb[1]) > 1): defaults['depth'] = qb[1][1] else: defaults.update(qb[1]) if defaults['with_lists']: defaults['with_lists'] = [int(i) for i in defaults['with_lists'].split(',')] qb[1] = defaults return qbuffer
'Set self.focus for kwarg queries'
def enter_root(self, root):
if root: self.result[root] = self.focus = {} else: self.focus = self.result
'The primary purpose of this function is to store the sql field list and the depth to which we process.'
def process_fields(self, field_names, depth):
self.field_names = field_names self.num_fields = len(field_names) if ((depth == 0) or (depth >= self.num_fields)): self.depth = (self.num_fields - 1) else: self.depth = depth
'This function takes a list of database results and iterates over, merging them into a dict form.'
def process_results(self, rows):
listify = OrderedDict() listify_dicts = OrderedDict() for ret in rows: crd = self.focus for i in range(0, (self.depth - 1)): if ((i + 1) in self.with_lists): if (id(crd) not in listify): listify[id(crd)] = [] listify_dicts[id(crd)] = crd if (ret[i] not in listify[id(crd)]): listify[id(crd)].append(ret[i]) if (ret[i] not in crd): crd[ret[i]] = {} crd = crd[ret[i]] else: ty = type(crd[ret[i]]) if (ty is list): temp = {} crd[ret[i]].append(temp) crd = temp elif (ty is not dict): if self.as_list: temp = {} crd[ret[i]] = [crd[ret[i]], temp] crd = temp else: crd[ret[i]] = {} crd = crd[ret[i]] else: crd = crd[ret[i]] if (self.depth == (self.num_fields - 1)): nk = (self.num_fields - 2) if ((self.as_list and (ret[nk] in crd)) or ((nk + 1) in self.with_lists)): if (ret[nk] in crd): if (not isinstance(crd[ret[nk]], list)): crd[ret[nk]] = [crd[ret[nk]]] else: crd[ret[nk]] = [] crd[ret[nk]].append(ret[(self.num_fields - 1)]) elif ((not self.ignore_null) or (ret[(self.num_fields - 1)] is not None)): crd[ret[nk]] = ret[(self.num_fields - 1)] else: if (ret[(self.depth - 1)] not in crd): crd[ret[(self.depth - 1)]] = {} if (self.depth in self.with_lists): if (id(crd) not in listify): listify[id(crd)] = [] listify_dicts[id(crd)] = crd if (ret[(self.depth - 1)] not in listify[id(crd)]): listify[id(crd)].append(ret[(self.depth - 1)]) crd = crd[ret[(self.depth - 1)]] for i in range(self.depth, self.num_fields): nk = self.field_names[i] if ((i + 1) in self.with_lists): if (id(crd) not in listify): listify[id(crd)] = [] listify_dicts[id(crd)] = crd if (nk not in listify[id(crd)]): listify[id(crd)].append(nk) if (self.as_list and (nk in crd)): if isinstance(crd[nk], list): crd[nk].append(ret[i]) else: crd[nk] = [crd[nk], ret[i]] elif ((not self.ignore_null) or (ret[i] is not None)): crd[nk] = ret[i] ks = list(listify_dicts.keys()) ks.reverse() for i in ks: d = listify_dicts[i] for k in listify[i]: if isinstance(d[k], dict): d[k] = list(d[k].values()) elif isinstance(d[k], list): d[k] = [d[k]]
'Execute queries, merge and return as a dict.'
def fetch(self, minion_id, pillar, *args, **kwargs):
db_name = self._db_name() log.info('Querying {0} for information for {1}'.format(db_name, minion_id)) qbuffer = self.extract_queries(args, kwargs) with self._get_cursor() as cursor: for (root, details) in qbuffer: cursor.execute(details['query'], (minion_id,)) self.process_fields([row[0] for row in cursor.description], details['depth']) self.enter_root(root) self.as_list = details['as_list'] if details['with_lists']: self.with_lists = details['with_lists'] else: self.with_lists = [] self.ignore_null = details['ignore_null'] self.process_results(cursor.fetchall()) log.debug('ext_pillar {0}: Return data: {1}'.format(db_name, self)) return self.result
'Return a future which will contain the pillar data from the master'
@tornado.gen.coroutine def compile_pillar(self):
load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['environment'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext try: ret_pillar = (yield self.channel.crypted_transfer_decode_dictentry(load, dictkey='pillar')) except: log.exception('Exception getting pillar:') raise SaltClientError('Exception getting pillar.') if (not isinstance(ret_pillar, dict)): msg = 'Got a bad pillar from master, type {0}, expecting dict: {1}'.format(type(ret_pillar).__name__, ret_pillar) log.error(msg) raise SaltClientError(msg) raise tornado.gen.Return(ret_pillar)
'Return the pillar data from the master'
def compile_pillar(self):
load = {'id': self.minion_id, 'grains': self.grains, 'saltenv': self.opts['environment'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, 'ver': '2', 'cmd': '_pillar'} if self.ext: load['ext'] = self.ext ret_pillar = self.channel.crypted_transfer_decode_dictentry(load, dictkey='pillar') if (not isinstance(ret_pillar, dict)): log.error('Got a bad pillar from master, type {0}, expecting dict: {1}'.format(type(ret_pillar).__name__, ret_pillar)) return {} return ret_pillar
'Return the path to the cache file for the minion. Used only for disk-based backends'
def _minion_cache_path(self, minion_id):
return os.path.join(self.opts['cachedir'], 'pillar_cache', minion_id)
'In the event of a cache miss, we need to incur the overhead of caching a new pillar.'
def fetch_pillar(self):
log.debug('Pillar cache getting external pillar with ext: {0}'.format(self.ext)) fresh_pillar = Pillar(self.opts, self.grains, self.minion_id, self.saltenv, ext=self.ext, functions=self.functions, pillar_override=self.pillar_override, pillarenv=self.pillarenv) return fresh_pillar.compile_pillar()
'Check to see if the on demand external pillar is allowed'
def __valid_on_demand_ext_pillar(self, opts):
if (not isinstance(self.ext, dict)): log.error('On-demand pillar %s is not formatted as a dictionary', self.ext) return False on_demand = opts.get('on_demand_ext_pillar', []) try: invalid_on_demand = set([x for x in self.ext if (x not in on_demand)]) except TypeError: log.error("The 'on_demand_ext_pillar' configuration option is malformed, it should be a list of ext_pillar module names") return False if invalid_on_demand: log.error("The following ext_pillar modules are not allowed for on-demand pillar data: %s. Valid on-demand ext_pillar modules are: %s. The valid modules can be adjusted by setting the 'on_demand_ext_pillar' config option.", ', '.join(sorted(invalid_on_demand)), ', '.join(on_demand)) return False return True
'Gather the lists of available sls data from the master'
def __gather_avail(self):
avail = {} for saltenv in self._get_envs(): avail[saltenv] = self.client.list_states(saltenv) return avail
'The options need to be altered to conform to the file client'
def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None):
opts = copy.deepcopy(opts_in) opts['file_roots'] = opts['pillar_roots'] opts['file_client'] = 'local' if (not grains): opts['grains'] = {} else: opts['grains'] = grains opts['environment'] = (saltenv if (saltenv is not None) else opts.get('environment')) opts['pillarenv'] = (pillarenv if (pillarenv is not None) else opts.get('pillarenv')) opts['id'] = self.minion_id if opts['state_top'].startswith('salt://'): opts['state_top'] = opts['state_top'] elif opts['state_top'].startswith('/'): opts['state_top'] = salt.utils.url.create(opts['state_top'][1:]) else: opts['state_top'] = salt.utils.url.create(opts['state_top']) if (self.ext and self.__valid_on_demand_ext_pillar(opts)): if ('ext_pillar' in opts): opts['ext_pillar'].append(self.ext) else: opts['ext_pillar'] = [self.ext] return opts
'Pull the file server environments out of the master options'
def _get_envs(self):
envs = set(['base']) if ('file_roots' in self.opts): envs.update(list(self.opts['file_roots'])) return envs
'Gather the top files'
def get_tops(self):
tops = collections.defaultdict(list) include = collections.defaultdict(list) done = collections.defaultdict(list) errors = [] try: if self.opts['pillarenv']: if (self.opts['pillarenv'] not in self.opts['file_roots']): log.debug("pillarenv '%s' not found in the configured pillar environments (%s)", self.opts['pillarenv'], ', '.join(self.opts['file_roots'])) else: tops[self.opts['pillarenv']] = [compile_template(self.client.cache_file(self.opts['state_top'], self.opts['pillarenv']), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], self.opts['pillarenv'], _pillar_rend=True)] else: for saltenv in self._get_envs(): if (self.opts.get('pillar_source_merging_strategy', None) == 'none'): if (self.saltenv and (saltenv != self.saltenv)): continue if ((not self.saltenv) and (not (saltenv == 'base'))): continue top = self.client.cache_file(self.opts['state_top'], saltenv) if top: tops[saltenv].append(compile_template(top, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True)) except Exception as exc: errors.append('Rendering Primary Top file failed, render error:\n{0}'.format(exc)) log.error('Pillar rendering failed for minion {0}: '.format(self.minion_id), exc_info=True) for (saltenv, ctops) in six.iteritems(tops): for ctop in ctops: if ('include' not in ctop): continue for sls in ctop['include']: include[saltenv].append(sls) ctop.pop('include') while include: pops = [] for (saltenv, states) in six.iteritems(include): pops.append(saltenv) if (not states): continue for sls in states: if (sls in done[saltenv]): continue try: tops[saltenv].append(compile_template(self.client.get_state(sls, saltenv).get('dest', False), self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv=saltenv, _pillar_rend=True)) except Exception as exc: errors.append('Rendering Top file {0} failed, render error:\n{1}'.format(sls, exc)) done[saltenv].append(sls) for saltenv in pops: if (saltenv in include): include.pop(saltenv) return (tops, errors)
'Cleanly merge the top files'
def merge_tops(self, tops):
top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for (saltenv, targets) in six.iteritems(ctop): if (saltenv == 'include'): continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if ('match' in comp): matches.append(comp) if ('order' in comp): order = comp['order'] if (not isinstance(order, int)): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if (saltenv not in self.ignored_pillars): self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders)
'Returns the sorted high data from the merged top files'
def sort_top_targets(self, top, orders):
sorted_top = collections.defaultdict(OrderedDict) for (saltenv, targets) in six.iteritems(top): sorted_targets = sorted(targets, key=(lambda target: orders[saltenv][target])) for target in sorted_targets: sorted_top[saltenv][target] = targets[target] return sorted_top
'Returns the high data derived from the top file'
def get_top(self):
(tops, errors) = self.get_tops() try: merged_tops = self.merge_tops(tops) except TypeError as err: merged_tops = OrderedDict() errors.append('Error encountered while rendering pillar top file.') return (merged_tops, errors)
'Search through the top high data for matches and return the states that this minion needs to execute. Returns: {\'saltenv\': [\'state1\', \'state2\', ...]}'
def top_matches(self, top):
matches = {} for (saltenv, body) in six.iteritems(top): if self.opts['pillarenv']: if (saltenv != self.opts['pillarenv']): continue for (match, data) in six.iteritems(body): if self.matcher.confirm_top(match, data, self.opts.get('nodegroups', {})): if (saltenv not in matches): matches[saltenv] = env_matches = [] else: env_matches = matches[saltenv] for item in data: if (isinstance(item, six.string_types) and (item not in env_matches)): env_matches.append(item) return matches
'Collect a single pillar sls file and render it'
def render_pstate(self, sls, saltenv, mods, defaults=None):
if (defaults is None): defaults = {} err = '' errors = [] fn_ = self.client.get_state(sls, saltenv).get('dest', False) if (not fn_): if (sls in self.ignored_pillars.get(saltenv, [])): log.debug("Skipping ignored and missing SLS '{0}' in environment '{1}'".format(sls, saltenv)) return (None, mods, errors) elif self.opts['pillar_roots'].get(saltenv): msg = "Specified SLS '{0}' in environment '{1}' is not available on the salt master".format(sls, saltenv) log.error(msg) errors.append(msg) else: msg = "Specified SLS '{0}' in environment '{1}' was not found. ".format(sls, saltenv) if (self.opts.get('__git_pillar', False) is True): msg += 'This is likely caused by a git_pillar top file containing an environment other than the one for the branch in which it resides. Each git_pillar branch/tag must have its own top file.' else: msg += "This could be because SLS '{0}' is in an environment other than '{1}', but '{1}' is included in that environment's Pillar top file. It could also be due to environment '{1}' not being defined in 'pillar_roots'.".format(sls, saltenv) log.debug(msg) return (None, mods, errors) state = None try: state = compile_template(fn_, self.rend, self.opts['renderer'], self.opts['renderer_blacklist'], self.opts['renderer_whitelist'], saltenv, sls, _pillar_rend=True, **defaults) except Exception as exc: msg = "Rendering SLS '{0}' failed, render error:\n{1}".format(sls, exc) log.critical(msg) if self.opts.get('pillar_safe_render_error', True): errors.append("Rendering SLS '{0}' failed. Please see master log for details.".format(sls)) else: errors.append(msg) mods.add(sls) nstate = None if state: if (not isinstance(state, dict)): msg = "SLS '{0}' does not render to a dictionary".format(sls) log.error(msg) errors.append(msg) elif ('include' in state): if (not isinstance(state['include'], list)): msg = "Include Declaration in SLS '{0}' is not formed as a list".format(sls) log.error(msg) errors.append(msg) else: for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): (sub_sls, v) = next(six.iteritems(sub_sls)) defaults = v.get('defaults', {}) key = v.get('key', None) else: key = None if (sub_sls not in mods): (nstate, mods, err) = self.render_pstate(sub_sls, saltenv, mods, defaults) if nstate: if key: for key_fragment in reversed(key.split(':')): nstate = {key_fragment: nstate} state = merge(state, nstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) if err: errors += err return (state, mods, errors)
'Extract the sls pillar files from the matches and render them into the pillar'
def render_pillar(self, matches, errors=None):
pillar = copy.copy(self.pillar_override) if (errors is None): errors = [] for (saltenv, pstates) in six.iteritems(matches): pstatefiles = [] mods = set() for sls_match in pstates: matched_pstates = [] try: matched_pstates = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: errors.extend(["No matching pillar environment for environment '{0}' found".format(saltenv)]) if matched_pstates: pstatefiles.extend(matched_pstates) else: pstatefiles.append(sls_match) for sls in pstatefiles: (pstate, mods, err) = self.render_pstate(sls, saltenv, mods) if err: errors += err if (pstate is not None): if (not isinstance(pstate, dict)): log.error("The rendered pillar sls file, '{0}' state did not return the expected data format. This is a sign of a malformed pillar sls file. Returned errors: {1}".format(sls, ', '.join(["'{0}'".format(e) for e in errors]))) continue pillar = merge(pillar, pstate, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) return (pillar, errors)
'Builds actual pillar data structure and updates the ``pillar`` variable'
def _external_pillar_data(self, pillar, val, pillar_dirs, key):
ext = None if isinstance(val, dict): ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): if (key == 'git'): ext = self.ext_pillars[key](self.minion_id, val, pillar_dirs) else: ext = self.ext_pillars[key](self.minion_id, pillar, *val) elif (key == 'git'): ext = self.ext_pillars[key](self.minion_id, val, pillar_dirs) else: ext = self.ext_pillars[key](self.minion_id, pillar, val) return ext
'Render the external pillar data'
def ext_pillar(self, pillar, pillar_dirs, errors=None):
if (errors is None): errors = [] try: if (self.ext and ('git' in self.ext) and (self.opts.get('__role') != 'minion')): import salt.utils.gitfs from salt.pillar.git_pillar import PER_REMOTE_OVERRIDES git_pillar = salt.utils.gitfs.GitPillar(self.opts) git_pillar.init_remotes(self.ext['git'], PER_REMOTE_OVERRIDES) git_pillar.fetch_remotes() except TypeError: pass if ('ext_pillar' not in self.opts): return (pillar, errors) if (not isinstance(self.opts['ext_pillar'], list)): errors.append('The "ext_pillar" option is malformed') log.critical(errors[(-1)]) return (pillar, errors) ext = None if self.pillar_override: pillar = merge(pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) for run in self.opts['ext_pillar']: if (not isinstance(run, dict)): errors.append('The "ext_pillar" option is malformed') log.critical(errors[(-1)]) return ({}, errors) if (next(six.iterkeys(run)) in self.opts.get('exclude_ext_pillar', [])): continue for (key, val) in six.iteritems(run): if (key not in self.ext_pillars): log.critical('Specified ext_pillar interface {0} is unavailable'.format(key)) continue try: ext = self._external_pillar_data(pillar, val, pillar_dirs, key) except Exception as exc: errors.append('Failed to load ext_pillar {0}: {1}'.format(key, exc.__str__())) log.error("Execption caught loading ext_pillar '%s':\n%s", key, ''.join(traceback.format_tb(sys.exc_info()[2]))) if ext: pillar = merge(pillar, ext, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) ext = None return (pillar, errors)
'Render the pillar data and return'
def compile_pillar(self, ext=True, pillar_dirs=None):
(top, top_errors) = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): (self.opts['pillar'], errors) = self.ext_pillar(self.pillar_override, pillar_dirs) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) (pillar, errors) = self.render_pillar(matches, errors=errors) pillar = merge(self.opts['pillar'], pillar, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) else: matches = self.top_matches(top) (pillar, errors) = self.render_pillar(matches) (pillar, errors) = self.ext_pillar(pillar, pillar_dirs, errors=errors) else: matches = self.top_matches(top) (pillar, errors) = self.render_pillar(matches) errors.extend(top_errors) if self.opts.get('pillar_opts', False): mopts = dict(self.opts) if ('grains' in mopts): mopts.pop('grains') mopts['file_roots'] = self.actual_file_roots mopts['saltversion'] = __version__ pillar['master'] = mopts if errors: for error in errors: log.critical('Pillar render error: {0}'.format(error)) pillar['_errors'] = errors if self.pillar_override: pillar = merge(pillar, self.pillar_override, self.merge_strategy, self.opts.get('renderer', 'yaml'), self.opts.get('pillar_merge_lists', False)) decrypt_errors = self.decrypt_pillar(pillar) if decrypt_errors: pillar.setdefault('_errors', []).extend(decrypt_errors) return pillar
'Decrypt the specified pillar dictionary items, if configured to do so'
def decrypt_pillar(self, pillar):
errors = [] if self.opts.get('decrypt_pillar'): decrypt_pillar = self.opts['decrypt_pillar'] if (not isinstance(decrypt_pillar, dict)): decrypt_pillar = salt.utils.repack_dictlist(self.opts['decrypt_pillar']) if (not decrypt_pillar): errors.append('decrypt_pillar config option is malformed') for (key, rend) in six.iteritems(decrypt_pillar): ptr = salt.utils.traverse_dict(pillar, key, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if (ptr is None): log.debug('Pillar key %s not present', key) continue try: hash(ptr) immutable = True except TypeError: immutable = False try: ret = salt.utils.crypt.decrypt(ptr, (rend or self.opts['decrypt_pillar_default']), renderers=self.rend, opts=self.opts, valid_rend=self.opts['decrypt_pillar_renderers']) if immutable: (parent, _, child) = key.rpartition(self.opts['decrypt_pillar_delimiter']) if (not parent): ptr = pillar else: ptr = salt.utils.traverse_dict(pillar, parent, default=None, delimiter=self.opts['decrypt_pillar_delimiter']) if (ptr is not None): ptr[child] = ret except Exception as exc: msg = "Failed to decrypt pillar key '{0}': {1}".format(key, exc) errors.append(msg) log.error(msg, exc_info=True) return errors
'Try to initialize the SVN repo object'
def __init__(self, branch, repo_location, root, opts):
repo_hash = hashlib.md5(repo_location).hexdigest() repo_dir = os.path.join(opts['cachedir'], 'pillar_svnfs', repo_hash) self.branch = branch self.root = root self.repo_dir = repo_dir self.repo_location = repo_location if (not os.path.isdir(repo_dir)): os.makedirs(repo_dir) log.debug('Checking out fileserver for svn_pillar module') try: CLIENT.checkout(repo_location, repo_dir) except pysvn.ClientError: log.error('Failed to initialize svn_pillar {0} {1}'.format(repo_location, repo_dir))
'Returns the directory of the pillars (repo cache + branch + root)'
def pillar_dir(self):
repo_dir = self.repo_dir root = self.root branch = self.branch if ((branch == 'trunk') or (branch == 'base')): working_dir = os.path.join(repo_dir, 'trunk', root) if (not os.path.isdir(working_dir)): log.error('Could not find {0}/trunk/{1}'.format(self.repo_location, root)) else: return os.path.normpath(working_dir) working_dir = os.path.join(repo_dir, 'branches', branch, root) if os.path.isdir(working_dir): return os.path.normpath(working_dir) working_dir = os.path.join(working_dir, 'tags', branch, root) if os.path.isdir(working_dir): return os.path.normpath(working_dir) log.error('Could not find {0}/branches/{1}/{2}'.format(self.repo_location, branch, root)) return repo_dir
'Returns options used for the MySQL connection.'
def _get_options(self):
defaults = {'host': 'localhost', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl': {}} _options = {} _opts = __opts__.get('mysql', {}) for attr in defaults: if (attr not in _opts): log.debug('Using default for MySQL {0}'.format(attr)) _options[attr] = defaults[attr] continue _options[attr] = _opts[attr] return _options
'Yield a MySQL cursor'
@contextmanager def _get_cursor(self):
_options = self._get_options() conn = MySQLdb.connect(host=_options['host'], user=_options['user'], passwd=_options['pass'], db=_options['db'], port=_options['port'], ssl=_options['ssl']) cursor = conn.cursor() try: (yield cursor) except MySQLdb.DatabaseError as err: log.exception('Error in ext_pillar MySQL: {0}'.format(err.args)) finally: conn.close()
'This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts.'
def extract_queries(self, args, kwargs):
return super(MySQLExtPillar, self).extract_queries(args, kwargs)
'Returns options used for the SQLCipher connection.'
def _get_options(self):
defaults = {'database': '/var/lib/salt/pillar-sqlcipher.db', 'pass': 'strong_pass_phrase', 'timeout': 5.0} _options = {} _opts = __opts__.get('sqlcipher', {}) for attr in defaults: if (attr not in _opts): log.debug('Using default for SQLCipher pillar %s', attr) _options[attr] = defaults[attr] continue _options[attr] = _opts[attr] return _options
'Yield a SQLCipher cursor'
@contextmanager def _get_cursor(self):
_options = self._get_options() conn = sqlcipher.connect(_options.get('database'), timeout=float(_options.get('timeout'))) conn.execute('pragma key="{0}"'.format(_options.get('pass'))) cursor = conn.cursor() try: (yield cursor) except sqlcipher.Error as err: log.exception('Error in ext_pillar SQLCipher: %s', err.args) finally: conn.close()
'Initialize a hg repo (or open it if it already exists)'
def __init__(self, repo_uri):
self.repo_uri = repo_uri cachedir = os.path.join(__opts__['cachedir'], 'hg_pillar') hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) if six.PY2: repo_hash = hash_type(repo_uri).hexdigest() else: repo_hash = hash_type(salt.utils.stringutils.to_bytes(repo_uri)).hexdigest() self.working_dir = os.path.join(cachedir, repo_hash) if (not os.path.isdir(self.working_dir)): self.repo = hglib.clone(repo_uri, self.working_dir) self.repo.open() else: self.repo = hglib.open(self.working_dir)
'Ensure we are using the latest revision in the hg repository'
def update(self, branch='default'):
log.debug('Updating hg repo from hg_pillar module (pull)') self.repo.pull() log.debug('Updating hg repo from hg_pillar module (update)') self.repo.update(branch, clean=True)
'Cleanup mercurial command server'
def close(self):
self.repo.close()
'Try to initialize the Git repo object'
def __init__(self, branch, repo_location, opts):
self.branch = self.map_branch(branch, opts) self.rp_location = repo_location self.opts = opts self._envs = set() self.working_dir = '' self.repo = None hash_type = getattr(hashlib, opts['hash_type']) hash_str = '{0} {1}'.format(self.branch, self.rp_location) repo_hash = hash_type(salt.utils.stringutils.to_bytes(hash_str)).hexdigest() rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash) if (not os.path.isdir(rp_)): os.makedirs(rp_) try: self.repo = git.Repo.init(rp_) except (git.exc.NoSuchPathError, git.exc.InvalidGitRepositoryError) as exc: log.error('GitPython exception caught while initializing the repo: %s. Maybe the git CLI program is not available.', exc) except Exception as exc: log.exception('Undefined exception in git pillar. This may be a bug should be reported to the SaltStack developers.') self.working_dir = rp_ if isinstance(self.repo, git.Repo): if (not self.repo.remotes): try: self.repo.create_remote('origin', self.rp_location) if self.opts.get('pillar_gitfs_ssl_verify', True): self.repo.git.config('http.sslVerify', 'true') else: self.repo.git.config('http.sslVerify', 'false') except os.error: pass elif (self.repo.remotes.origin.url != self.rp_location): self.repo.remotes.origin.config_writer.set('url', self.rp_location)
'Ensure you are following the latest changes on the remote Return boolean whether it worked'
def update(self):
try: log.debug("Legacy git_pillar: Updating '%s'", self.rp_location) self.repo.git.fetch() except git.exc.GitCommandError as exc: log.error('Unable to fetch the latest changes from remote %s: %s', self.rp_location, exc) return False try: checkout_ref = 'origin/{0}'.format(self.branch) log.debug("Legacy git_pillar: Checking out %s for '%s'", checkout_ref, self.rp_location) self.repo.git.checkout(checkout_ref) except git.exc.GitCommandError as exc: log.error("Legacy git_pillar: Failed to checkout %s for '%s': %s", checkout_ref, self.rp_location, exc) return False return True
'Return a list of refs that can be used as environments'
def envs(self):
if isinstance(self.repo, git.Repo): remote = self.repo.remote() for ref in self.repo.refs: parted = ref.name.partition('/') short = (parted[2] if parted[2] else parted[0]) if isinstance(ref, git.Head): if (short == 'master'): short = 'base' if (ref not in remote.stale_refs): self._envs.add(short) elif isinstance(ref, git.Tag): self._envs.add(short) return list(self._envs)
'Returns options used for the POSTGRES connection.'
def _get_options(self):
defaults = {'host': 'localhost', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 5432} _options = {} _opts = __opts__.get('postgres', {}) for attr in defaults: if (attr not in _opts): log.debug('Using default for POSTGRES {0}'.format(attr)) _options[attr] = defaults[attr] continue _options[attr] = _opts[attr] return _options
'Yield a POSTGRES cursor'
@contextmanager def _get_cursor(self):
_options = self._get_options() conn = psycopg2.connect(host=_options['host'], user=_options['user'], password=_options['pass'], dbname=_options['db']) cursor = conn.cursor() try: (yield cursor) log.debug('Connected to POSTGRES DB') except psycopg2.DatabaseError as err: log.exception('Error in ext_pillar POSTGRES: {0}'.format(err.args)) finally: conn.close()
'This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts.'
def extract_queries(self, args, kwargs):
return super(POSTGRESExtPillar, self).extract_queries(args, kwargs)
'Returns options used for the SQLite3 connection.'
def _get_options(self):
defaults = {'database': '/var/lib/salt/pillar.db', 'timeout': 5.0} _options = {} _opts = {} if (('sqlite3' in __opts__) and ('database' in __opts__['sqlite3'])): _opts = __opts__.get('sqlite3', {}) for attr in defaults: if (attr not in _opts): log.debug('Using default for SQLite3 pillar {0}'.format(attr)) _options[attr] = defaults[attr] continue _options[attr] = _opts[attr] return _options
'Yield a SQLite3 cursor'
@contextmanager def _get_cursor(self):
_options = self._get_options() conn = sqlite3.connect(_options.get('database'), timeout=float(_options.get('timeout'))) cursor = conn.cursor() try: (yield cursor) except sqlite3.Error as err: log.exception('Error in ext_pillar SQLite3: {0}'.format(err.args)) finally: conn.close()
'Run the logic for saltkey'
def run(self):
self._update_opts() cmd = self.opts[u'fun'] veri = None ret = None try: if (cmd in (u'accept', u'reject', u'delete')): ret = self._run_cmd(u'name_match') if (not isinstance(ret, dict)): salt.output.display_output(ret, u'key', opts=self.opts) return ret ret = self._filter_ret(cmd, ret) if (not ret): self._print_no_match(cmd, self.opts[u'match']) return print(u'The following keys are going to be {0}ed:'.format(cmd.rstrip(u'e'))) salt.output.display_output(ret, u'key', opts=self.opts) if (not self.opts.get(u'yes', False)): try: if cmd.startswith(u'delete'): veri = input(u'Proceed? [N/y] ') if (not veri): veri = u'n' else: veri = input(u'Proceed? [n/Y] ') if (not veri): veri = u'y' except KeyboardInterrupt: raise SystemExit(u'\nExiting on CTRL-c') self.opts[u'match_dict'] = ret self.opts.pop(u'match', None) list_ret = ret if ((veri is None) or veri.lower().startswith(u'y')): ret = self._run_cmd(cmd) if (cmd in (u'accept', u'reject', u'delete')): if (cmd == u'delete'): ret = list_ret for minions in ret.values(): for minion in minions: print(u'Key for minion {0} {1}ed.'.format(minion, cmd.rstrip(u'e'))) elif isinstance(ret, dict): salt.output.display_output(ret, u'key', opts=self.opts) else: salt.output.display_output({u'return': ret}, u'key', opts=self.opts) except salt.exceptions.SaltException as exc: ret = u'{0}'.format(exc) if (not self.opts.get(u'quiet', False)): salt.output.display_output(ret, u'nested', self.opts) return ret
'Call the given function on all backend keys'
def _call_all(self, fun, *args):
for kback in self.keys: print(kback) getattr(self.keys[kback], fun)(*args)
'Return the minion keys directory paths'
def _check_minions_directories(self):
minions_accepted = os.path.join(self.opts[u'pki_dir'], self.ACC) minions_pre = os.path.join(self.opts[u'pki_dir'], self.PEND) minions_rejected = os.path.join(self.opts[u'pki_dir'], self.REJ) minions_denied = os.path.join(self.opts[u'pki_dir'], self.DEN) return (minions_accepted, minions_pre, minions_rejected, minions_denied)
'Generate minion RSA public keypair'
def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None):
(keydir, keyname, keysize, user) = self._get_key_attrs(keydir, keyname, keysize, user) salt.crypt.gen_keys(keydir, keyname, keysize, user, self.passphrase) return salt.utils.pem_finger(os.path.join(keydir, (keyname + u'.pub')))
'Generate master public-key-signature'
def gen_signature(self, privkey, pubkey, sig_path):
return salt.crypt.gen_signature(privkey, pubkey, sig_path, self.passphrase)
'Generate master public-key-signature'
def gen_keys_signature(self, priv, pub, signature_path, auto_create=False, keysize=None):
if pub: if (not os.path.isfile(pub)): return u'Public-key {0} does not exist'.format(pub) else: mpub = ((self.opts[u'pki_dir'] + u'/') + u'master.pub') if os.path.isfile(mpub): pub = mpub if priv: if (not os.path.isfile(priv)): return u'Private-key {0} does not exist'.format(priv) else: mpriv = ((self.opts[u'pki_dir'] + u'/') + u'master_sign.pem') if os.path.isfile(mpriv): priv = mpriv if (not priv): if auto_create: log.debug(u'Generating new signing key-pair .%s.* in %s', self.opts[u'master_sign_key_name'], self.opts[u'pki_dir']) salt.crypt.gen_keys(self.opts[u'pki_dir'], self.opts[u'master_sign_key_name'], (keysize or self.opts[u'keysize']), self.opts.get(u'user'), self.passphrase) priv = (((self.opts[u'pki_dir'] + u'/') + self.opts[u'master_sign_key_name']) + u'.pem') else: return u'No usable private-key found' if (not pub): return u'No usable public-key found' log.debug(u'Using public-key %s', pub) log.debug(u'Using private-key %s', priv) if signature_path: if (not os.path.isdir(signature_path)): log.debug(u'target directory %s does not exist', signature_path) else: signature_path = self.opts[u'pki_dir'] sign_path = ((signature_path + u'/') + self.opts[u'master_pubkey_signature']) skey = get_key(self.opts) return skey.gen_signature(priv, pub, sign_path)
'Check the minion cache to make sure that old minion data is cleared Optionally, pass in a list of minions which should have their caches preserved. To preserve all caches, set __opts__[\'preserve_minion_cache\']'
def check_minion_cache(self, preserve_minions=None):
if (preserve_minions is None): preserve_minions = [] keys = self.list_keys() minions = [] for (key, val) in six.iteritems(keys): minions.extend(val) if ((not self.opts.get(u'preserve_minion_cache', False)) or (not preserve_minions)): m_cache = os.path.join(self.opts[u'cachedir'], self.ACC) if os.path.isdir(m_cache): for minion in os.listdir(m_cache): if ((minion not in minions) and (minion not in preserve_minions)): shutil.rmtree(os.path.join(m_cache, minion)) cache = salt.cache.factory(self.opts) clist = cache.ls(self.ACC) if clist: for minion in clist: if ((minion not in minions) and (minion not in preserve_minions)): cache.flush(u'{0}/{1}'.format(self.ACC, minion))
'Log if the master is not running :rtype: bool :return: Whether or not the master is running'
def check_master(self):
if (not os.path.exists(os.path.join(self.opts[u'sock_dir'], u'publish_pull.ipc'))): return False return True
'Accept a glob which to match the of a key and return the key\'s location'
def name_match(self, match, full=False):
if full: matches = self.all_keys() else: matches = self.list_keys() ret = {} if ((u',' in match) and isinstance(match, six.string_types)): match = match.split(u',') for (status, keys) in six.iteritems(matches): for key in salt.utils.isorted(keys): if isinstance(match, list): for match_item in match: if fnmatch.fnmatch(key, match_item): if (status not in ret): ret[status] = [] ret[status].append(key) elif fnmatch.fnmatch(key, match): if (status not in ret): ret[status] = [] ret[status].append(key) return ret
'Accept a dictionary of keys and return the current state of the specified keys'
def dict_match(self, match_dict):
ret = {} cur_keys = self.list_keys() for (status, keys) in six.iteritems(match_dict): for key in salt.utils.isorted(keys): for keydir in (self.ACC, self.PEND, self.REJ, self.DEN): if (keydir and fnmatch.filter(cur_keys.get(keydir, []), key)): ret.setdefault(keydir, []).append(key) return ret
'Return a dict of local keys'
def local_keys(self):
ret = {u'local': []} for fn_ in salt.utils.isorted(os.listdir(self.opts[u'pki_dir'])): if (fn_.endswith(u'.pub') or fn_.endswith(u'.pem')): path = os.path.join(self.opts[u'pki_dir'], fn_) if os.path.isfile(path): ret[u'local'].append(fn_) return ret
'Return a dict of managed keys and what the key status are'
def list_keys(self):
key_dirs = [] key_dirs = self._check_minions_directories() ret = {} for dir_ in key_dirs: if (dir_ is None): continue ret[os.path.basename(dir_)] = [] try: for fn_ in salt.utils.isorted(os.listdir(dir_)): if (not fn_.startswith(u'.')): if os.path.isfile(os.path.join(dir_, fn_)): ret[os.path.basename(dir_)].append(fn_) except (OSError, IOError): continue return ret