desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Find the specified file in the specified environment'
def find_file(self, path, tgt_env):
tree = self.get_tree(tgt_env) if (not tree): return (None, None, None) blob = None depth = 0 while True: depth += 1 if (depth > SYMLINK_RECURSE_DEPTH): blob = None break try: file_blob = (tree / path) if stat.S_ISLNK(file_blob.mode): stream = six.StringIO() file_blob.stream_data(stream) stream.seek(0) link_tgt = stream.read() stream.close() path = salt.utils.path.join(os.path.dirname(path), link_tgt, use_posixpath=True) else: blob = file_blob if isinstance(blob, git.Tree): blob = None break except KeyError: blob = None break if isinstance(blob, git.Blob): return (blob, blob.hexsha, blob.mode) return (None, None, None)
'Return the configured refspecs'
def get_refspecs(self):
refspecs = self.repo.git.config('--get-all', 'remote.origin.fetch') return [x.strip() for x in refspecs.splitlines()]
'Return a git.Tree object matching a head ref fetched into refs/remotes/origin/'
def get_tree_from_branch(self, ref):
try: return git.RemoteReference(self.repo, 'refs/remotes/origin/{0}'.format(ref)).commit.tree except ValueError: return None
'Return a git.Tree object matching a tag ref fetched into refs/tags/'
def get_tree_from_tag(self, ref):
try: return git.TagReference(self.repo, 'refs/tags/{0}'.format(ref)).commit.tree except ValueError: return None
'Return a git.Tree object matching a SHA'
def get_tree_from_sha(self, ref):
try: return self.repo.rev_parse(ref).tree except (gitdb.exc.ODBError, AttributeError): return None
'Using the blob object, write the file to the destination path'
def write_file(self, blob, dest):
with salt.utils.files.fopen(dest, 'wb+') as fp_: blob.stream_data(fp_)
'Add the specified refspecs to the "origin" remote'
def add_refspecs(self, *refspecs):
for refspec in refspecs: try: self.repo.config.set_multivar('remote.origin.fetch', 'FOO', refspec) log.debug("Added refspec '%s' to %s remote '%s'", refspec, self.role, self.id) except Exception as exc: log.error("Failed to add refspec '%s' to %s remote '%s': %s", refspec, self.role, self.id, exc)
'Checkout the configured branch/tag'
def checkout(self):
tgt_ref = self.get_checkout_target() local_ref = ('refs/heads/' + tgt_ref) remote_ref = ('refs/remotes/origin/' + tgt_ref) tag_ref = ('refs/tags/' + tgt_ref) try: local_head = self.repo.lookup_reference('HEAD') except KeyError: log.warning("HEAD not present in %s remote '%s'", self.role, self.id) return None try: head_sha = local_head.get_object().hex except AttributeError: log.error("Unable to get SHA of HEAD for %s remote '%s'", self.role, self.id) return None except KeyError: head_sha = None refs = self.repo.listall_references() def _perform_checkout(checkout_ref, branch=True): '\n DRY function for checking out either a branch or a tag\n ' try: with self.gen_lock(lock_type='checkout'): self.repo.checkout(checkout_ref) if branch: self.repo.reset(oid, pygit2.GIT_RESET_HARD) return True except GitLockError as exc: if (exc.errno == errno.EEXIST): raise GitLockError(exc.errno, "Checkout lock exists for {0} remote '{1}'".format(self.role, self.id)) else: log.error("Error %d encountered obtaining checkout lock for %s remote '%s'", exc.errno, self.role, self.id) return False try: if (remote_ref in refs): oid = self.repo.lookup_reference(remote_ref).get_object().id if (local_ref not in refs): self.repo.create_reference(local_ref, oid) try: target_sha = self.repo.lookup_reference(remote_ref).get_object().hex except KeyError: log.error("pygit2 was unable to get SHA for %s in %s remote '%s'", local_ref, self.role, self.id, exc_info=True) return None if (head_sha != target_sha): head_ref = local_head.target if (isinstance(head_ref, six.string_types) and (head_ref not in refs) and (head_ref != local_ref)): branch_name = head_ref.partition('refs/heads/')[(-1)] if (not branch_name): log.error("pygit2 was unable to resolve branch name from HEAD ref '%s' in %s remote '%s'", head_ref, self.role, self.id) return None remote_head = ('refs/remotes/origin/' + branch_name) if (remote_head not in refs): remote_head = remote_ref self.repo.create_reference(head_ref, self.repo.lookup_reference(remote_head).target) if (not _perform_checkout(local_ref, branch=True)): return None return self.check_root() elif (tag_ref in refs): tag_obj = self.repo.revparse_single(tag_ref) if (not isinstance(tag_obj, pygit2.Tag)): log.error('%s does not correspond to pygit2.Tag object', tag_ref) else: try: tag_sha = tag_obj.target.hex except AttributeError: try: tag_sha = tag_obj.hex except AttributeError: log.error("Unable to resolve %s from %s remote '%s' to either an annotated or non-annotated tag", tag_ref, self.role, self.id, exc_info=True) return None if (head_sha != target_sha): if (not _perform_checkout(local_ref, branch=False)): return None return self.check_root() except GitLockError: raise except Exception as exc: log.error("Failed to checkout %s from %s remote '%s': %s", tgt_ref, self.role, self.id, exc, exc_info=True) return None log.error("Failed to checkout %s from %s remote '%s': remote ref does not exist", tgt_ref, self.role, self.id) return None
'Clean stale local refs so they don\'t appear as fileserver environments'
def clean_stale_refs(self, local_refs=None):
if (self.credentials is not None): log.debug("pygit2 does not support detecting stale refs for authenticated remotes, saltenvs will not reflect branches/tags removed from remote '%s'", self.id) return [] return super(Pygit2, self).clean_stale_refs()
'Initialize/attach to a remote using pygit2. Return a boolean which will let the calling function know whether or not a new repo was initialized by this function.'
def init_remote(self):
new = False if (not os.listdir(self.cachedir)): self.repo = pygit2.init_repository(self.cachedir) new = True else: try: try: self.repo = pygit2.Repository(self.cachedir) except GitError as exc: import pwd if ("Error stat'ing config file" not in str(exc)): raise home = pwd.getpwnam(salt.utils.get_user()).pw_dir pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home self.repo = pygit2.Repository(self.cachedir) except KeyError: log.error(_INVALID_REPO.format(self.cachedir, self.url, self.role)) return new self.gitdir = salt.utils.path.join(self.repo.workdir, '.git') if (not self.repo.remotes): try: self.repo.create_remote('origin', self.url) except os.error: pass else: new = True try: ssl_verify = self.repo.config.get_bool('http.sslVerify') except KeyError: ssl_verify = None if (ssl_verify != self.ssl_verify): self.repo.config.set_multivar('http.sslVerify', '', str(self.ssl_verify).lower()) if hasattr(self, 'refspecs'): self.configure_refspecs() return new
'Get a list of directories for the target environment using pygit2'
def dir_list(self, tgt_env):
def _traverse(tree, blobs, prefix): '\n Traverse through a pygit2 Tree object recursively, accumulating all\n the empty directories within it in the "blobs" list\n ' for entry in iter(tree): if (entry.oid not in self.repo): continue blob = self.repo[entry.oid] if (not isinstance(blob, pygit2.Tree)): continue blobs.append(salt.utils.path.join(prefix, entry.name, use_posixpath=True)) if len(blob): _traverse(blob, blobs, salt.utils.path.join(prefix, entry.name, use_posixpath=True)) ret = set() tree = self.get_tree(tgt_env) if (not tree): return ret if self.root(tgt_env): try: oid = tree[self.root(tgt_env)].oid tree = self.repo[oid] except KeyError: return ret if (not isinstance(tree, pygit2.Tree)): return ret relpath = (lambda path: os.path.relpath(path, self.root(tgt_env))) else: relpath = (lambda path: path) blobs = [] if len(tree): _traverse(tree, blobs, self.root(tgt_env)) add_mountpoint = (lambda path: salt.utils.path.join(self.mountpoint(tgt_env), path, use_posixpath=True)) for blob in blobs: ret.add(add_mountpoint(relpath(blob))) if self.mountpoint(tgt_env): ret.add(self.mountpoint(tgt_env)) return ret
'Check the refs and return a list of the ones which can be used as salt environments.'
def envs(self):
ref_paths = self.repo.listall_references() return self._get_envs_from_ref_paths(ref_paths)
'Fetch the repo. If the local copy was updated, return True. If the local copy was already up-to-date, return False.'
def _fetch(self):
origin = self.repo.remotes[0] refs_pre = self.repo.listall_references() fetch_kwargs = {} if (self.remotecallbacks is not None): fetch_kwargs['callbacks'] = self.remotecallbacks elif (self.credentials is not None): origin.credentials = self.credentials try: fetch_results = origin.fetch(**fetch_kwargs) except GitError as exc: exc_str = get_error_message(exc).lower() if (('unsupported url protocol' in exc_str) and isinstance(self.credentials, pygit2.Keypair)): log.error("Unable to fetch SSH-based %s remote '%s'. You may need to add ssh:// to the repo string or libgit2 must be compiled with libssh2 to support SSH authentication.", self.role, self.id, exc_info=True) elif ('authentication required but no callback set' in exc_str): log.error("%s remote '%s' requires authentication, but no authentication configured", self.role, self.id, exc_info=True) else: log.error("Error occurred fetching %s remote '%s': %s", self.role, self.id, exc, exc_info=True) return False try: received_objects = fetch_results['received_objects'] except (AttributeError, TypeError): received_objects = fetch_results.received_objects if (received_objects != 0): log.debug("%s received %s objects for remote '%s'", self.role, received_objects, self.id) else: log.debug("%s remote '%s' is up-to-date", self.role, self.id) refs_post = self.repo.listall_references() cleaned = self.clean_stale_refs(local_refs=refs_post) return bool((received_objects or (refs_pre != refs_post) or cleaned))
'Get file list for the target environment using pygit2'
def file_list(self, tgt_env):
def _traverse(tree, blobs, prefix): '\n Traverse through a pygit2 Tree object recursively, accumulating all\n the file paths and symlink info in the "blobs" dict\n ' for entry in iter(tree): if (entry.oid not in self.repo): continue obj = self.repo[entry.oid] if isinstance(obj, pygit2.Blob): repo_path = salt.utils.path.join(prefix, entry.name, use_posixpath=True) blobs.setdefault('files', []).append(repo_path) if stat.S_ISLNK(tree[entry.name].filemode): link_tgt = self.repo[tree[entry.name].oid].data blobs.setdefault('symlinks', {})[repo_path] = link_tgt elif isinstance(obj, pygit2.Tree): _traverse(obj, blobs, salt.utils.path.join(prefix, entry.name, use_posixpath=True)) files = set() symlinks = {} tree = self.get_tree(tgt_env) if (not tree): return (files, symlinks) if self.root(tgt_env): try: oid = tree[self.root(tgt_env)].oid tree = self.repo[oid] except KeyError: return (files, symlinks) if (not isinstance(tree, pygit2.Tree)): return (files, symlinks) relpath = (lambda path: os.path.relpath(path, self.root(tgt_env))) else: relpath = (lambda path: path) blobs = {} if len(tree): _traverse(tree, blobs, self.root(tgt_env)) add_mountpoint = (lambda path: salt.utils.path.join(self.mountpoint(tgt_env), path, use_posixpath=True)) for repo_path in blobs.get('files', []): files.add(add_mountpoint(relpath(repo_path))) for (repo_path, link_tgt) in six.iteritems(blobs.get('symlinks', {})): symlinks[add_mountpoint(relpath(repo_path))] = link_tgt return (files, symlinks)
'Find the specified file in the specified environment'
def find_file(self, path, tgt_env):
tree = self.get_tree(tgt_env) if (not tree): return (None, None, None) blob = None mode = None depth = 0 while True: depth += 1 if (depth > SYMLINK_RECURSE_DEPTH): blob = None break try: entry = tree[path] mode = entry.filemode if stat.S_ISLNK(mode): link_tgt = self.repo[entry.oid].data path = salt.utils.path.join(os.path.dirname(path), link_tgt, use_posixpath=True) else: blob = self.repo[entry.oid] if isinstance(blob, pygit2.Tree): blob = None break except KeyError: blob = None break if isinstance(blob, pygit2.Blob): return (blob, blob.hex, mode) return (None, None, None)
'Return the configured refspecs'
def get_refspecs(self):
if (not [x for x in self.repo.config if x.startswith('remote.origin.')]): raise GitRemoteError("'origin' remote not not present") return list(self.repo.config.get_multivar('remote.origin.fetch'))
'Return a pygit2.Tree object matching a head ref fetched into refs/remotes/origin/'
def get_tree_from_branch(self, ref):
try: return self.repo.lookup_reference('refs/remotes/origin/{0}'.format(ref)).get_object().tree except KeyError: return None
'Return a pygit2.Tree object matching a tag ref fetched into refs/tags/'
def get_tree_from_tag(self, ref):
try: return self.repo.lookup_reference('refs/tags/{0}'.format(ref)).get_object().tree except KeyError: return None
'Return a pygit2.Tree object matching a SHA'
def get_tree_from_sha(self, ref):
try: return self.repo.revparse_single(ref).tree except (KeyError, TypeError, ValueError, AttributeError): return None
'Assign attributes for pygit2 callbacks'
def setup_callbacks(self):
pygit2_version = pygit2.__version__ if (distutils.version.LooseVersion(pygit2_version) >= distutils.version.LooseVersion('0.23.2')): self.remotecallbacks = pygit2.RemoteCallbacks(credentials=self.credentials) if (not self.ssl_verify): self.remotecallbacks.certificate_check = (lambda *args, **kwargs: True) else: self.remotecallbacks = None if (not self.ssl_verify): warnings.warn('pygit2 does not support disabling the SSL certificate check in versions prior to 0.23.2 (installed: {0}). Fetches for self-signed certificates will fail.'.format(pygit2_version))
'Check the username and password/keypair info for validity. If valid, set a \'credentials\' attribute consisting of the appropriate Pygit2 credentials object. Return False if a required auth param is not present. Return True if the required auth parameters are present (or auth is not configured), otherwise failhard if there is a problem with authenticaion.'
def verify_auth(self):
self.credentials = None if os.path.isabs(self.url): return True elif (not any((getattr(self, x, None) for x in AUTH_PARAMS))): return True def _incomplete_auth(missing): '\n Helper function to log errors about missing auth parameters\n ' log.critical("Incomplete authentication information for %s remote '%s'. Missing parameters: %s", self.role, self.id, ', '.join(missing)) failhard(self.role) def _key_does_not_exist(key_type, path): '\n Helper function to log errors about missing key file\n ' log.critical("SSH %s (%s) for %s remote '%s' could not be found, path may be incorrect. Note that it may be necessary to clear git_pillar locks to proceed once this is resolved and the master has been started back up. A warning will be logged if this is the case, with instructions.", key_type, path, self.role, self.id) failhard(self.role) (transport, _, address) = self.url.partition('://') if (not address): transport = 'ssh' address = self.url transport = transport.lower() if (transport in ('git', 'file')): return True elif ('ssh' in transport): required_params = ('pubkey', 'privkey') user = address.split('@')[0] if (user == address): log.critical("Keypair specified for %s remote '%s', but remote URL is missing a username", self.role, self.id) failhard(self.role) self.user = user if all((bool(getattr(self, x, None)) for x in required_params)): keypair_params = [getattr(self, x, None) for x in ('user', 'pubkey', 'privkey', 'passphrase')] for (idx, key_type) in ((1, 'pubkey'), (2, 'privkey')): key_path = keypair_params[idx] if (key_path is not None): try: if (not os.path.isfile(key_path)): _key_does_not_exist(key_type, key_path) except TypeError: _key_does_not_exist(key_type, key_path) self.credentials = pygit2.Keypair(*keypair_params) return True else: missing_auth = [x for x in required_params if (not bool(getattr(self, x, None)))] _incomplete_auth(missing_auth) elif ('http' in transport): required_params = ('user', 'password') password_ok = all((bool(getattr(self, x, None)) for x in required_params)) no_password_auth = (not any((bool(getattr(self, x, None)) for x in required_params))) if no_password_auth: return True if password_ok: if ((transport == 'http') and (not self.insecure_auth)): log.critical("Invalid configuration for %s remote '%s'. Authentication is disabled by default on http remotes. Either set %s_insecure_auth to True in the master configuration file, set a per-remote config option named 'insecure_auth' to True, or use https or ssh-based authentication.", self.role, self.id, self.role) failhard(self.role) self.credentials = pygit2.UserPass(self.user, self.password) return True else: missing_auth = [x for x in required_params if (not bool(getattr(self, x, None)))] _incomplete_auth(missing_auth) else: log.critical("Invalid configuration for %s remote '%s'. Unsupported transport '%s'.", self.role, self.id, transport) failhard(self.role)
'Using the blob object, write the file to the destination path'
def write_file(self, blob, dest):
with salt.utils.files.fopen(dest, 'wb+') as fp_: fp_.write(blob.data)
'IMPORTANT: If specifying a cache_root, understand that this is also where the remotes will be cloned. A non-default cache_root is only really designed right now for winrepo, as its repos need to be checked out into the winrepo locations and not within the cachedir.'
def __init__(self, opts, valid_providers=VALID_PROVIDERS, cache_root=None):
self.opts = opts self.valid_providers = valid_providers self.get_provider() if (cache_root is not None): self.cache_root = self.remote_root = cache_root else: self.cache_root = salt.utils.path.join(self.opts['cachedir'], self.role) self.remote_root = salt.utils.path.join(self.cache_root, 'remotes') self.env_cache = salt.utils.path.join(self.cache_root, 'envs.p') self.hash_cachedir = salt.utils.path.join(self.cache_root, 'hash') self.file_list_cachedir = salt.utils.path.join(self.opts['cachedir'], 'file_lists', self.role)
'Initialize remotes'
def init_remotes(self, remotes, per_remote_overrides, per_remote_only=PER_REMOTE_ONLY):
override_params = copy.deepcopy(per_remote_overrides) global_auth_params = ['{0}_{1}'.format(self.role, x) for x in AUTH_PARAMS if self.opts['{0}_{1}'.format(self.role, x)]] if (self.provider in AUTH_PROVIDERS): override_params += AUTH_PARAMS elif global_auth_params: msg = "{0} authentication was configured, but the '{1}' {0}_provider does not support authentication. The providers for which authentication is supported in {0} are: {2}.".format(self.role, self.provider, ', '.join(AUTH_PROVIDERS)) if (self.role == 'gitfs'): msg += ' See the GitFS Walkthrough in the Salt documentation for further information.' log.critical(msg) failhard(self.role) per_remote_defaults = {} for param in override_params: key = '{0}_{1}'.format(self.role, param) if (key not in self.opts): log.critical("Key '%s' not present in global configuration. This is a bug, please report it.", key) failhard(self.role) per_remote_defaults[param] = enforce_types(key, self.opts[key]) self.remotes = [] for remote in remotes: repo_obj = self.provider_class(self.opts, remote, per_remote_defaults, per_remote_only, override_params, self.cache_root, self.role) if hasattr(repo_obj, 'repo'): repo_obj.verify_auth() repo_obj.setup_callbacks() if ((self.opts['__role'] == 'minion') and repo_obj.new): repo_obj.fetch() repo_obj.saltenv_revmap = {} for (saltenv, saltenv_conf) in six.iteritems(repo_obj.saltenv): if ('ref' in saltenv_conf): ref = saltenv_conf['ref'] if (saltenv == 'base'): repo_obj.saltenv[saltenv].pop('ref') if (ref != repo_obj.base): log.warning("The 'base' environment has been defined in the 'saltenv' param for %s remote %s and will override the branch/tag specified by %s_base (or a per-remote 'base' parameter).", self.role, repo_obj.id, self.role) repo_obj.base = ref else: repo_obj.saltenv_revmap.setdefault(ref, []).append(saltenv) all_envs = [] for env_names in six.itervalues(repo_obj.saltenv_revmap): all_envs.extend(env_names) for (key, conf) in six.iteritems(repo_obj.global_saltenv): if ((key not in all_envs) and ('ref' in conf)): repo_obj.saltenv_revmap.setdefault(conf['ref'], []).append(key) self.remotes.append(repo_obj) cachedir_map = {} for repo in self.remotes: cachedir_map.setdefault(repo.cachedir, []).append(repo.id) collisions = [x for x in cachedir_map if (len(cachedir_map[x]) > 1)] if collisions: for dirname in collisions: log.critical("The following {0} remotes have conflicting cachedirs: {1}. Resolve this using a per-remote parameter called 'name'.".format(self.role, ', '.join(cachedir_map[dirname]))) failhard(self.role) if any((x.new for x in self.remotes)): self.write_remote_map()
'Remove cache directories for remotes no longer configured'
def clear_old_remotes(self):
try: cachedir_ls = os.listdir(self.cache_root) except OSError: cachedir_ls = [] for repo in self.remotes: try: cachedir_ls.remove(repo.cachedir_basename) except ValueError: pass to_remove = [] for item in cachedir_ls: if (item in ('hash', 'refs')): continue path = salt.utils.path.join(self.cache_root, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error('Unable to remove old {0} remote cachedir {1}: {2}'.format(self.role, rdir, exc)) failed.append(rdir) else: log.debug('{0} removed old cachedir {1}'.format(self.role, rdir)) for fdir in failed: to_remove.remove(fdir) ret = bool(to_remove) if ret: self.write_remote_map() return ret
'Completely clear cache'
def clear_cache(self):
errors = [] for rdir in (self.cache_root, self.file_list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors
'Clear update.lk for all remotes'
def clear_lock(self, remote=None, lock_type='update'):
cleared = [] errors = [] for repo in self.remotes: if remote: try: if (not fnmatch.fnmatch(repo.url, remote)): continue except TypeError: if (not fnmatch.fnmatch(repo.url, six.text_type(remote))): continue (success, failed) = repo.clear_lock(lock_type=lock_type) cleared.extend(success) errors.extend(failed) return (cleared, errors)
'Fetch all remotes and return a boolean to let the calling function know whether or not any remotes were updated in the process of fetching'
def fetch_remotes(self):
changed = False for repo in self.remotes: try: if repo.fetch(): changed = True except Exception as exc: log.error("Exception caught while fetching %s remote '%s': %s", self.role, repo.id, exc, exc_info=True) return changed
'Place an update.lk'
def lock(self, remote=None):
locked = [] errors = [] for repo in self.remotes: if remote: try: if (not fnmatch.fnmatch(repo.url, remote)): continue except TypeError: if (not fnmatch.fnmatch(repo.url, six.text_type(remote))): continue (success, failed) = repo.lock() locked.extend(success) errors.extend(failed) return (locked, errors)
'Execute a git fetch on all of the repos and perform maintenance on the fileserver cache.'
def update(self):
data = {'changed': False, 'backend': 'gitfs'} data['changed'] = self.clear_old_remotes() if self.fetch_remotes(): data['changed'] = True refresh_env_cache = (self.opts['__role'] == 'minion') if ((data['changed'] is True) or (not os.path.isfile(self.env_cache))): env_cachedir = os.path.dirname(self.env_cache) if (not os.path.exists(env_cachedir)): os.makedirs(env_cachedir) refresh_env_cache = True if refresh_env_cache: new_envs = self.envs(ignore_cache=True) serial = salt.payload.Serial(self.opts) with salt.utils.files.fopen(self.env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to {0}'.format(self.env_cache)) if self.opts.get('fileserver_events', False): event = salt.utils.event.get_event('master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) event.fire_event(data, tagify(['gitfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir(self.hash_cachedir, self.find_file) except (OSError, IOError): pass
'Determine which provider to use'
def get_provider(self):
if ('verified_{0}_provider'.format(self.role) in self.opts): self.provider = self.opts['verified_{0}_provider'.format(self.role)] else: desired_provider = self.opts.get('{0}_provider'.format(self.role)) if (not desired_provider): if self.verify_pygit2(quiet=True): self.provider = 'pygit2' elif self.verify_gitpython(quiet=True): self.provider = 'gitpython' else: try: desired_provider = desired_provider.lower() except AttributeError: desired_provider = str(desired_provider).lower() if (desired_provider not in self.valid_providers): log.critical("Invalid {0}_provider '{1}'. Valid choices are: {2}".format(self.role, desired_provider, ', '.join(self.valid_providers))) failhard(self.role) elif ((desired_provider == 'pygit2') and self.verify_pygit2()): self.provider = 'pygit2' elif ((desired_provider == 'gitpython') and self.verify_gitpython()): self.provider = 'gitpython' if (not hasattr(self, 'provider')): log.critical('No suitable {0} provider module is installed.'.format(self.role)) failhard(self.role) if (self.provider == 'pygit2'): self.provider_class = Pygit2 elif (self.provider == 'gitpython'): self.provider_class = GitPython
'Check if GitPython is available and at a compatible version (>= 0.3.0)'
def verify_gitpython(self, quiet=False):
def _recommend(): if (HAS_PYGIT2 and ('pygit2' in self.valid_providers)): log.error(_RECOMMEND_PYGIT2.format(self.role)) if (not HAS_GITPYTHON): if (not quiet): log.error('%s is configured but could not be loaded, is GitPython installed?', self.role) _recommend() return False elif ('gitpython' not in self.valid_providers): return False gitver = _LooseVersion(git.__version__) minver = _LooseVersion(GITPYTHON_MINVER) errors = [] if (gitver < minver): errors.append('{0} is configured, but the GitPython version is earlier than {1}. Version {2} detected.'.format(self.role, GITPYTHON_MINVER, git.__version__)) if (not salt.utils.path.which('git')): errors.append("The git command line utility is required when using the 'gitpython' {0}_provider.".format(self.role)) if errors: for error in errors: log.error(error) if (not quiet): _recommend() return False self.opts['verified_{0}_provider'.format(self.role)] = 'gitpython' log.debug('gitpython {0}_provider enabled'.format(self.role)) return True
'Check if pygit2/libgit2 are available and at a compatible version. Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0.'
def verify_pygit2(self, quiet=False):
def _recommend(): if (HAS_GITPYTHON and ('gitpython' in self.valid_providers)): log.error(_RECOMMEND_GITPYTHON.format(self.role)) if (not HAS_PYGIT2): if (not quiet): log.error('%s is configured but could not be loaded, are pygit2 and libgit2 installed?', self.role) _recommend() return False elif ('pygit2' not in self.valid_providers): return False pygit2ver = _LooseVersion(pygit2.__version__) pygit2_minver = _LooseVersion(PYGIT2_MINVER) libgit2ver = _LooseVersion(pygit2.LIBGIT2_VERSION) libgit2_minver = _LooseVersion(LIBGIT2_MINVER) errors = [] if (pygit2ver < pygit2_minver): errors.append('{0} is configured, but the pygit2 version is earlier than {1}. Version {2} detected.'.format(self.role, PYGIT2_MINVER, pygit2.__version__)) if (libgit2ver < libgit2_minver): errors.append('{0} is configured, but the libgit2 version is earlier than {1}. Version {2} detected.'.format(self.role, LIBGIT2_MINVER, pygit2.LIBGIT2_VERSION)) if (not salt.utils.path.which('git')): errors.append("The git command line utility is required when using the 'pygit2' {0}_provider.".format(self.role)) if errors: for error in errors: log.error(error) if (not quiet): _recommend() return False self.opts['verified_{0}_provider'.format(self.role)] = 'pygit2' log.debug('pygit2 {0}_provider enabled'.format(self.role)) return True
'Write the remote_map.txt'
def write_remote_map(self):
remote_map = salt.utils.path.join(self.cache_root, 'remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# {0}_remote map as of {1}\n'.format(self.role, timestamp)) for repo in self.remotes: fp_.write('{0} = {1}\n'.format(repo.cachedir_basename, repo.id)) except OSError: pass else: log.info('Wrote new {0} remote map to {1}'.format(self.role, remote_map))
'Common code for git_pillar/winrepo to handle locking and checking out of a repo.'
def do_checkout(self, repo):
time_start = time.time() while ((time.time() - time_start) <= 5): try: return repo.checkout() except GitLockError as exc: if (exc.errno == errno.EEXIST): time.sleep(0.1) continue else: log.error("Error %d encountered while obtaining checkout lock for %s remote '%s': %s", exc.errno, repo.role, repo.id, exc, exc_info=True) break else: log.error("Timed out waiting for checkout lock to be released for %s remote '%s'. If this error persists, run 'salt-run cache.clear_git_lock %s type=checkout' to clear it.", self.role, repo.id, self.role) return None
'Return a list of all directories on the master'
def dir_list(self, load):
return self._file_lists(load, 'dirs')
'Return a list of refs that can be used as environments'
def envs(self, ignore_cache=False):
if (not ignore_cache): cache_match = salt.fileserver.check_env_cache(self.opts, self.env_cache) if (cache_match is not None): return cache_match ret = set() for repo in self.remotes: repo_envs = set() if (not repo.disable_saltenv_mapping): repo_envs.update(repo.envs()) for env_list in six.itervalues(repo.saltenv_revmap): repo_envs.update(env_list) ret.update([x for x in repo_envs if repo.env_is_exposed(x)]) return sorted(ret)
'Find the first file to match the path and ref, read the file out of git and send the path to the newly cached file'
def find_file(self, path, tgt_env='base', **kwargs):
fnd = {'path': '', 'rel': ''} if (os.path.isabs(path) or ((not salt.utils.stringutils.is_hex(tgt_env)) and (tgt_env not in self.envs()))): return fnd dest = salt.utils.path.join(self.cache_root, 'refs', tgt_env, path) hashes_glob = salt.utils.path.join(self.hash_cachedir, tgt_env, '{0}.hash.*'.format(path)) blobshadest = salt.utils.path.join(self.hash_cachedir, tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = salt.utils.path.join(self.hash_cachedir, tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if (not os.path.isdir(destdir)): try: os.makedirs(destdir) except OSError: os.remove(destdir) os.makedirs(destdir) if (not os.path.isdir(hashdir)): try: os.makedirs(hashdir) except OSError: os.remove(hashdir) os.makedirs(hashdir) for repo in self.remotes: if (repo.mountpoint(tgt_env) and (not path.startswith((repo.mountpoint(tgt_env) + os.sep)))): continue repo_path = path[len(repo.mountpoint(tgt_env)):].lstrip(os.sep) if repo.root(tgt_env): repo_path = salt.utils.path.join(repo.root(tgt_env), repo_path) (blob, blob_hexsha, blob_mode) = repo.find_file(repo_path, tgt_env) if (blob is None): continue def _add_file_stat(fnd, mode): "\n Add a the mode to the return dict. In other fileserver backends\n we stat the file to get its mode, and add the stat result\n (passed through list() for better serialization) to the 'stat'\n key in the return dict. However, since we aren't using the\n stat result for anything but the mode at this time, we can\n avoid unnecessary work by just manually creating the list and\n not running an os.stat() on all files in the repo.\n " if (mode is not None): fnd['stat'] = [mode] return fnd salt.fileserver.wait_lock(lk_fn, dest) if (os.path.isfile(blobshadest) and os.path.isfile(dest)): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if (sha == blob_hexsha): fnd['rel'] = path fnd['path'] = dest return _add_file_stat(fnd, blob_mode) with salt.utils.files.fopen(lk_fn, 'w+') as fp_: fp_.write('') for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass repo.write_file(blob, dest) with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(blob_hexsha) try: os.remove(lk_fn) except OSError: pass fnd['rel'] = path fnd['path'] = dest return _add_file_stat(fnd, blob_mode) return fnd
'Return a chunk from a file based on the data received'
def serve_file(self, load, fnd):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') ret = {'data': '', 'dest': ''} required_load_keys = set(['path', 'loc', 'saltenv']) if (not all(((x in load) for x in required_load_keys))): log.debug('Not all of the required keys present in payload. Missing: {0}'.format(', '.join(required_load_keys.difference(load)))) return ret if (not fnd['path']): return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(self.opts['file_buffer_size']) if (data and six.PY3 and (not salt.utils.is_bin_file(fpath))): data = data.decode(__salt_system_encoding__) if (gzip and data): data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret
'Return a file hash, the hash type is set in the master config file'
def file_hash(self, load, fnd):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') if (not all(((x in load) for x in ('path', 'saltenv')))): return ('', None) ret = {'hash_type': self.opts['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = salt.utils.path.join(self.hash_cachedir, load['saltenv'], '{0}.hash.{1}'.format(relpath, self.opts['hash_type'])) if (not os.path.isfile(hashdest)): if (not os.path.exists(os.path.dirname(hashdest))): os.makedirs(os.path.dirname(hashdest)) ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = fp_.read() return ret
'Return a dict containing the file lists for files and dirs'
def _file_lists(self, load, form):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') if (not os.path.isdir(self.file_list_cachedir)): try: os.makedirs(self.file_list_cachedir) except os.error: log.error('Unable to make cachedir {0}'.format(self.file_list_cachedir)) return [] list_cache = salt.utils.path.join(self.file_list_cachedir, '{0}.p'.format(load['saltenv'].replace(os.path.sep, '_|-'))) w_lock = salt.utils.path.join(self.file_list_cachedir, '.{0}.w'.format(load['saltenv'].replace(os.path.sep, '_|-'))) (cache_match, refresh_cache, save_cache) = salt.fileserver.check_file_list_cache(self.opts, form, list_cache, w_lock) if (cache_match is not None): return cache_match if refresh_cache: ret = {'files': set(), 'symlinks': {}, 'dirs': set()} if (salt.utils.stringutils.is_hex(load['saltenv']) or (load['saltenv'] in self.envs())): for repo in self.remotes: (repo_files, repo_symlinks) = repo.file_list(load['saltenv']) ret['files'].update(repo_files) ret['symlinks'].update(repo_symlinks) ret['dirs'].update(repo.dir_list(load['saltenv'])) ret['files'] = sorted(ret['files']) ret['dirs'] = sorted(ret['dirs']) if save_cache: salt.fileserver.write_file_list_cache(self.opts, ret, list_cache, w_lock) return ret.get(form, []) return ({} if (form == 'symlinks') else [])
'Return a list of all files on the file server in a specified environment'
def file_list(self, load):
return self._file_lists(load, 'files')
'Return a list of all empty directories on the master'
def file_list_emptydirs(self, load):
return []
'Return a dict of all symlinks based on a given path in the repo'
def symlink_list(self, load):
if ('env' in load): salt.utils.warn_until('Oxygen', "Parameter 'env' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") load.pop('env') if ((not salt.utils.stringutils.is_hex(load['saltenv'])) and (load['saltenv'] not in self.envs())): return {} if ('prefix' in load): prefix = load['prefix'].strip('/') else: prefix = '' symlinks = self._file_lists(load, 'symlinks') return dict([(key, val) for (key, val) in six.iteritems(symlinks) if key.startswith(prefix)])
'Checkout the targeted branches/tags from the git_pillar remotes'
def checkout(self):
self.pillar_dirs = OrderedDict() self.pillar_linked_dirs = [] for repo in self.remotes: cachedir = self.do_checkout(repo) if (cachedir is not None): if repo.env: env = repo.env else: base_branch = self.opts['{0}_base'.format(self.role)] env = ('base' if (repo.branch == base_branch) else repo.branch) if repo._mountpoint: if self.link_mountpoint(repo, cachedir): self.pillar_dirs[repo.linkdir] = env self.pillar_linked_dirs.append(repo.linkdir) else: self.pillar_dirs[cachedir] = env
'Ensure that the mountpoint is linked to the passed cachedir'
def link_mountpoint(self, repo, cachedir):
lcachelink = salt.utils.path.join(repo.linkdir, repo._mountpoint) if (not os.path.islink(lcachelink)): ldirname = os.path.dirname(lcachelink) try: os.symlink(cachedir, lcachelink) except OSError as exc: if (exc.errno == errno.ENOENT): try: os.makedirs(ldirname) except OSError as exc: log.error('Failed to create path %s: %s', ldirname, exc.__str__()) return False else: try: os.symlink(cachedir, lcachelink) except OSError: log.error('Could not create symlink to %s at path %s: %s', cachedir, lcachelink, exc.__str__()) return False elif (exc.errno == errno.EEXIST): try: salt.utils.files.rm_rf(lcachelink) except OSError as exc: log.error('Failed to remove file/dir at path %s: %s', lcachelink, exc.__str__()) return False else: try: os.symlink(cachedir, lcachelink) except OSError: log.error('Could not create symlink to %s at path %s: %s', cachedir, lcachelink, exc.__str__()) return False else: log.error('Could not create symlink to %s at path %s: %s', cachedir, lcachelink, exc.__str__()) return False return True
'Execute a git fetch on all of the repos. In this case, simply execute self.fetch_remotes() from the parent class. This function only exists to make the git_pillar update code in master.py (salt.master.Maintenance.handle_git_pillar) less complicated, once the legacy git_pillar code is purged we can remove this function and just run pillar.fetch_remotes() there.'
def update(self):
return self.fetch_remotes()
'Checkout the targeted branches/tags from the winrepo remotes'
def checkout(self):
self.winrepo_dirs = {} for repo in self.remotes: cachedir = self.do_checkout(repo) if (cachedir is not None): self.winrepo_dirs[repo.id] = cachedir
'Return minions found by looking at nodegroups'
def _check_nodegroup_minions(self, expr, greedy):
return self._check_compound_minions(nodegroup_comp(expr, self.opts['nodegroups']), DEFAULT_TARGET_DELIM, greedy)
'Return the minions found by looking via globs'
def _check_glob_minions(self, expr, greedy):
return fnmatch.filter(self._pki_minions(), expr)
'Return the minions found by looking via a list'
def _check_list_minions(self, expr, greedy):
if isinstance(expr, six.string_types): expr = [m for m in expr.split(',') if m] minions = self._pki_minions() return [x for x in expr if (x in minions)]
'Return the minions found by looking via regular expressions'
def _check_pcre_minions(self, expr, greedy):
reg = re.compile(expr) return [m for m in self._pki_minions() if reg.match(m)]
'Retreive complete minion list from PKI dir. Respects cache if configured'
def _pki_minions(self):
minions = [] pki_cache_fn = os.path.join(self.opts['pki_dir'], self.acc, '.key_cache') try: if (self.opts['key_cache'] and os.path.exists(pki_cache_fn)): log.debug('Returning cached minion list') with salt.utils.files.fopen(pki_cache_fn) as fn_: return self.serial.load(fn_) else: for fn_ in salt.utils.isorted(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if ((not fn_.startswith('.')) and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_))): minions.append(fn_) return minions except OSError as exc: log.error('Encountered OSError while evaluating minions in PKI dir: {0}'.format(exc)) return minions
'Helper function to search for minions in master caches If \'greedy\' return accepted minions that matched by the condition or absend in the cache. If not \'greedy\' return the only minions have cache data and matched by the condition.'
def _check_cache_minions(self, expr, delimiter, greedy, search_type, regex_match=False, exact_match=False):
cache_enabled = self.opts.get('minion_data_cache', False) def list_cached_minions(): return self.cache.list('minions') if greedy: minions = [] for fn_ in salt.utils.isorted(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if ((not fn_.startswith('.')) and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_))): minions.append(fn_) elif cache_enabled: minions = list_cached_minions() else: return [] if cache_enabled: if greedy: cminions = list_cached_minions() else: cminions = minions if (not cminions): return minions minions = set(minions) for id_ in cminions: if (greedy and (id_ not in minions)): continue mdata = self.cache.fetch('minions/{0}'.format(id_), 'data') if (mdata is None): if (not greedy): minions.remove(id_) continue search_results = mdata.get(search_type) if (not salt.utils.subdict_match(search_results, expr, delimiter=delimiter, regex_match=regex_match, exact_match=exact_match)): minions.remove(id_) minions = list(minions) return minions
'Return the minions found by looking via grains'
def _check_grain_minions(self, expr, delimiter, greedy):
return self._check_cache_minions(expr, delimiter, greedy, 'grains')
'Return the minions found by looking via grains with PCRE'
def _check_grain_pcre_minions(self, expr, delimiter, greedy):
return self._check_cache_minions(expr, delimiter, greedy, 'grains', regex_match=True)
'Return the minions found by looking via pillar'
def _check_pillar_minions(self, expr, delimiter, greedy):
return self._check_cache_minions(expr, delimiter, greedy, 'pillar')
'Return the minions found by looking via pillar with PCRE'
def _check_pillar_pcre_minions(self, expr, delimiter, greedy):
return self._check_cache_minions(expr, delimiter, greedy, 'pillar', regex_match=True)
'Return the minions found by looking via pillar'
def _check_pillar_exact_minions(self, expr, delimiter, greedy):
return self._check_cache_minions(expr, delimiter, greedy, 'pillar', exact_match=True)
'Return the minions found by looking via ipcidr'
def _check_ipcidr_minions(self, expr, greedy):
cache_enabled = self.opts.get('minion_data_cache', False) if greedy: minions = self._pki_minions() elif cache_enabled: minions = self.cache.ls('minions') else: return [] if cache_enabled: if greedy: cminions = self.cache.ls('minions') else: cminions = minions if (cminions is None): return minions tgt = expr try: tgt = ipaddress.ip_address(tgt) except: try: tgt = ipaddress.ip_network(tgt) except: log.error('Invalid IP/CIDR target: {0}'.format(tgt)) return [] proto = 'ipv{0}'.format(tgt.version) minions = set(minions) for id_ in cminions: mdata = self.cache.fetch('minions/{0}'.format(id_), 'data') if (mdata is None): if (not greedy): minions.remove(id_) continue grains = mdata.get('grains') if ((grains is None) or (proto not in grains)): match = False elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)): match = (str(tgt) in grains[proto]) else: match = salt.utils.network.in_subnet(tgt, grains[proto]) if ((not match) and (id_ in minions)): minions.remove(id_) return list(minions)
'Return the minions found by looking via range expression'
def _check_range_minions(self, expr, greedy):
if (not HAS_RANGE): raise CommandExecutionError('Range matcher unavailable (unable to import seco.range, module most likely not installed)') if (not hasattr(self, '_range')): self._range = seco.range.Range(self.opts['range_server']) try: return self._range.expand(expr) except seco.range.RangeException as exc: log.error('Range exception in compound match: {0}'.format(exc)) cache_enabled = self.opts.get('minion_data_cache', False) if greedy: mlist = [] for fn_ in salt.utils.isorted(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if ((not fn_.startswith('.')) and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_))): mlist.append(fn_) return mlist elif cache_enabled: return self.cache.ls('minions') else: return list()
'Return the minions found by looking via compound matcher Disable pillar glob matching'
def _check_compound_pillar_exact_minions(self, expr, delimiter, greedy):
return self._check_compound_minions(expr, delimiter, greedy, pillar_exact=True)
'Return the minions found by looking via compound matcher'
def _check_compound_minions(self, expr, delimiter, greedy, pillar_exact=False):
log.debug('_check_compound_minions({0}, {1}, {2}, {3})'.format(expr, delimiter, greedy, pillar_exact)) if ((not isinstance(expr, six.string_types)) and (not isinstance(expr, (list, tuple)))): log.error('Compound target that is neither string, list nor tuple') return [] minions = set(self._pki_minions()) log.debug('minions: {0}'.format(minions)) if self.opts.get('minion_data_cache', False): ref = {'G': self._check_grain_minions, 'P': self._check_grain_pcre_minions, 'I': self._check_pillar_minions, 'J': self._check_pillar_pcre_minions, 'L': self._check_list_minions, 'N': None, 'S': self._check_ipcidr_minions, 'E': self._check_pcre_minions, 'R': self._all_minions} if pillar_exact: ref['I'] = self._check_pillar_exact_minions ref['J'] = self._check_pillar_exact_minions results = [] unmatched = [] opers = ['and', 'or', 'not', '(', ')'] if isinstance(expr, six.string_types): words = expr.split() else: words = expr for word in words: target_info = parse_target(word) if (word in opers): if results: if ((results[(-1)] == '(') and (word in ('and', 'or'))): log.error('Invalid beginning operator after "(": {0}'.format(word)) return [] if (word == 'not'): if (not (results[(-1)] in ('&', '|', '('))): results.append('&') results.append('(') results.append(str(set(minions))) results.append('-') unmatched.append('-') elif (word == 'and'): results.append('&') elif (word == 'or'): results.append('|') elif (word == '('): results.append(word) unmatched.append(word) elif (word == ')'): if ((not unmatched) or (unmatched[(-1)] != '(')): log.error('Invalid compound expr (unexpected right parenthesis): {0}'.format(expr)) return [] results.append(word) unmatched.pop() if (unmatched and (unmatched[(-1)] == '-')): results.append(')') unmatched.pop() else: log.error('Unhandled oper in compound expr: {0}'.format(expr)) return [] elif (word == 'not'): results.append('(') results.append(str(set(minions))) results.append('-') unmatched.append('-') elif (word == '('): results.append(word) unmatched.append(word) else: log.error('Expression may begin with binary operator: {0}'.format(word)) return [] elif (target_info and target_info['engine']): if ('N' == target_info['engine']): log.error('Detected nodegroup expansion failure of "{0}"'.format(word)) return [] engine = ref.get(target_info['engine']) if (not engine): log.error('Unrecognized target engine "{0}" for target expression "{1}"'.format(target_info['engine'], word)) return [] engine_args = [target_info['pattern']] if (target_info['engine'] in ('G', 'P', 'I', 'J')): engine_args.append((target_info['delimiter'] or ':')) engine_args.append(greedy) results.append(str(set(engine(*engine_args)))) if (unmatched and (unmatched[(-1)] == '-')): results.append(')') unmatched.pop() else: results.append(str(set(self._check_glob_minions(word, True)))) if (unmatched and (unmatched[(-1)] == '-')): results.append(')') unmatched.pop() results.extend([')' for item in unmatched]) results = ' '.join(results) log.debug('Evaluating final compound matching expr: {0}'.format(results)) try: return list(eval(results)) except Exception: log.error('Invalid compound target: {0}'.format(expr)) return [] return list(minions)
'Return a set of all connected minion ids, optionally within a subset'
def connected_ids(self, subset=None, show_ipv4=False, include_localhost=False):
minions = set() if self.opts.get('minion_data_cache', False): search = self.cache.ls('minions') if (search is None): return minions addrs = salt.utils.network.local_port_tcp(int(self.opts['publish_port'])) if (('127.0.0.1' in addrs) or ('0.0.0.0' in addrs)): addrs.discard('127.0.0.1') addrs.discard('0.0.0.0') addrs.update(set(salt.utils.network.ip_addrs(include_loopback=include_localhost))) if subset: search = subset for id_ in search: try: mdata = self.cache.fetch('minions/{0}'.format(id_), 'data') except SaltCacheError: continue if (mdata is None): continue grains = mdata.get('grains', {}) for ipv4 in grains.get('ipv4', []): if ((ipv4 == '127.0.0.1') and (not include_localhost)): continue if (ipv4 == '0.0.0.0'): continue if (ipv4 in addrs): if show_ipv4: minions.add((id_, ipv4)) else: minions.add(id_) break return minions
'Return a list of all minions that have auth\'d'
def _all_minions(self, expr=None):
mlist = [] for fn_ in salt.utils.isorted(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if ((not fn_.startswith('.')) and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_))): mlist.append(fn_) return mlist
'Check the passed regex against the available minions\' public keys stored for authentication. This should return a set of ids which match the regex, this will then be used to parse the returns to make sure everyone has checked back in.'
def check_minions(self, expr, tgt_type='glob', delimiter=DEFAULT_TARGET_DELIM, greedy=True):
try: if (expr is None): expr = '' check_func = getattr(self, '_check_{0}_minions'.format(tgt_type), None) if (tgt_type in ('grain', 'grain_pcre', 'pillar', 'pillar_pcre', 'pillar_exact', 'compound', 'compound_pillar_exact')): minions = check_func(expr, delimiter, greedy) else: minions = check_func(expr, greedy) except Exception: log.exception('Failed matching available minions with {0} pattern: {1}'.format(tgt_type, expr)) minions = [] return minions
'Return a Bool. This function returns if the expression sent in is within the scope of the valid expression'
def validate_tgt(self, valid, expr, tgt_type, minions=None, expr_form=None):
if (expr_form is not None): salt.utils.warn_until('Fluorine', "the target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.") tgt_type = expr_form v_minions = self._expand_matching(valid) if (minions is None): minions = set(self.check_minions(expr, tgt_type)) else: minions = set(minions) d_bool = (not bool(minions.difference(v_minions))) if ((len(v_minions) == len(minions)) and d_bool): return True return d_bool
'Validate a single regex to function comparison, the function argument can be a list of functions. It is all or nothing for a list of functions'
def match_check(self, regex, fun):
vals = [] if isinstance(fun, six.string_types): fun = [fun] for func in fun: try: if re.match(regex, func): vals.append(True) else: vals.append(False) except Exception: log.error('Invalid regular expression: {0}'.format(regex)) return (vals and all(vals))
'Read in the form and determine which auth check routine to execute'
def any_auth(self, form, auth_list, fun, arg, tgt=None, tgt_type='glob'):
if (form == 'publish'): return self.auth_check(auth_list, fun, arg, tgt, tgt_type) return self.spec_check(auth_list, fun, form)
'Returns a bool which defines if the requested function is authorized. Used to evaluate the standard structure under external master authentication interfaces, like eauth, peer, peer_run, etc.'
def auth_check(self, auth_list, funs, args, tgt, tgt_type='glob', groups=None, publish_validate=False, minions=None, whitelist=None):
if self.opts.get('auth.enable_expanded_auth_matching', False): return self.auth_check_expanded(auth_list, funs, args, tgt, tgt_type, groups, publish_validate) if publish_validate: v_tgt_type = tgt_type if (tgt_type.lower() in ('pillar', 'pillar_pcre')): v_tgt_type = 'pillar_exact' elif (tgt_type.lower() == 'compound'): v_tgt_type = 'compound_pillar_exact' v_minions = set(self.check_minions(tgt, v_tgt_type)) minions = set(self.check_minions(tgt, tgt_type)) mismatch = bool(minions.difference(v_minions)) if mismatch: return False if (not isinstance(funs, list)): funs = [funs] args = [args] try: for (num, fun) in enumerate(funs): if (whitelist and (fun in whitelist)): return True for ind in auth_list: if isinstance(ind, six.string_types): if self.match_check(ind, fun): return True elif isinstance(ind, dict): if (len(ind) != 1): continue valid = next(six.iterkeys(ind)) if self.validate_tgt(valid, tgt, tgt_type, minions=minions): if isinstance(ind[valid], six.string_types): if self.match_check(ind[valid], fun): return True elif isinstance(ind[valid], list): for cond in ind[valid]: if isinstance(cond, six.string_types): if self.match_check(cond, fun): return True elif isinstance(cond, dict): if (len(cond) != 1): continue fcond = next(six.iterkeys(cond)) if self.match_check(fcond, fun): acond = cond[fcond] if (not isinstance(acond, dict)): continue arg_list = args[num] cond_args = acond.get('args', []) good = True for (i, cond_arg) in enumerate(cond_args): if (len(arg_list) <= i): good = False break if (cond_arg is None): continue if (not self.match_check(cond_arg, arg_list[i])): good = False break if (not good): continue cond_kwargs = acond.get('kwargs', {}) arg_kwargs = {} for a in arg_list: if (isinstance(a, dict) and ('__kwarg__' in a)): arg_kwargs = a break for (k, v) in six.iteritems(cond_kwargs): if (k not in arg_kwargs): good = False break if (v is None): continue if (not self.match_check(v, arg_kwargs[k])): good = False break if good: return True except TypeError: return False return False
'Returns a list of authorisation matchers that a user is eligible for. This list is a combination of the provided personal matchers plus the matchers of any group the user is in.'
def fill_auth_list_from_groups(self, auth_provider, user_groups, auth_list):
group_names = [item for item in auth_provider if item.endswith('%')] if group_names: for group_name in group_names: if (group_name.rstrip('%') in user_groups): for matcher in auth_provider[group_name]: auth_list.append(matcher) return auth_list
'Check special API permissions'
def wheel_check(self, auth_list, fun):
comps = fun.split('.') if (len(comps) != 2): return False mod = comps[0] fun = comps[1] for ind in auth_list: if isinstance(ind, six.string_types): if (ind.startswith('@') and (ind[1:] == mod)): return True if (ind == '@wheel'): return True if (ind == '@wheels'): return True elif isinstance(ind, dict): if (len(ind) != 1): continue valid = next(six.iterkeys(ind)) if (valid.startswith('@') and (valid[1:] == mod)): if isinstance(ind[valid], six.string_types): if self.match_check(ind[valid], fun): return True elif isinstance(ind[valid], list): for regex in ind[valid]: if self.match_check(regex, fun): return True return False
'Check special API permissions'
def runner_check(self, auth_list, fun):
comps = fun.split('.') if (len(comps) != 2): return False mod = comps[0] fun = comps[1] for ind in auth_list: if isinstance(ind, six.string_types): if (ind.startswith('@') and (ind[1:] == mod)): return True if (ind == '@runners'): return True if (ind == '@runner'): return True elif isinstance(ind, dict): if (len(ind) != 1): continue valid = next(six.iterkeys(ind)) if (valid.startswith('@') and (valid[1:] == mod)): if isinstance(ind[valid], six.string_types): if self.match_check(ind[valid], fun): return True elif isinstance(ind[valid], list): for regex in ind[valid]: if self.match_check(regex, fun): return True return False
'Check special API permissions'
def spec_check(self, auth_list, fun, form):
if (form != 'cloud'): comps = fun.split('.') if (len(comps) != 2): return False mod = comps[0] fun = comps[1] else: mod = fun for ind in auth_list: if isinstance(ind, six.string_types): if (ind.startswith('@') and (ind[1:] == mod)): return True if (ind == '@{0}'.format(form)): return True if (ind == '@{0}s'.format(form)): return True elif isinstance(ind, dict): if (len(ind) != 1): continue valid = next(six.iterkeys(ind)) if (valid.startswith('@') and (valid[1:] == mod)): if isinstance(ind[valid], six.string_types): if self.match_check(ind[valid], fun): return True elif isinstance(ind[valid], list): for regex in ind[valid]: if self.match_check(regex, fun): return True return False
'Get pillar data for the targeted minions, either by fetching the cached minion data on the master, or by compiling the minion\'s pillar data on the master. For runner modules that need access minion pillar data, this function should be used instead of getting the pillar data by executing the pillar module on the minions. By default, this function tries hard to get the pillar data: - Try to get the cached minion grains and pillar if the master has minion_data_cache: True - If the pillar data for the minion is cached, use it. - If there is no cached grains/pillar data for a minion, then try to get the minion grains directly from the minion. - Use the minion grains to compile the pillar directly from the master using salt.pillar.Pillar'
def get_minion_pillar(self):
minion_pillars = {} minion_grains = {} minion_ids = self._tgt_to_list() if any((arg for arg in [self.use_cached_grains, self.use_cached_pillar, self.grains_fallback, self.pillar_fallback])): log.debug('Getting cached minion data') (cached_minion_grains, cached_minion_pillars) = self._get_cached_minion_data(*minion_ids) else: cached_minion_grains = {} cached_minion_pillars = {} log.debug('Getting minion grain data for: {0}'.format(minion_ids)) minion_grains = self._get_minion_grains(cached_grains=cached_minion_grains, *minion_ids) log.debug('Getting minion pillar data for: {0}'.format(minion_ids)) minion_pillars = self._get_minion_pillar(grains=minion_grains, cached_pillar=cached_minion_pillars, *minion_ids) return minion_pillars
'Get grains data for the targeted minions, either by fetching the cached minion data on the master, or by fetching the grains directly on the minion. By default, this function tries hard to get the pillar data: - Try to get the cached minion grains if the master has minion_data_cache: True - If the grains data for the minion is cached, use it. - If there is no cached grains data for a minion, then try to get the minion grains directly from the minion.'
def get_minion_grains(self):
minion_grains = {} minion_ids = self._tgt_to_list() if any((arg for arg in [self.use_cached_grains, self.grains_fallback])): log.debug('Getting cached minion data.') (cached_minion_grains, cached_minion_pillars) = self._get_cached_minion_data(*minion_ids) else: cached_minion_grains = {} log.debug('Getting minion grain data for: {0}'.format(minion_ids)) minion_grains = self._get_minion_grains(cached_grains=cached_minion_grains, *minion_ids) return minion_grains
'Get cached mine data for the targeted minions.'
def get_cached_mine_data(self):
mine_data = {} minion_ids = self._tgt_to_list() log.debug('Getting cached mine data for: {0}'.format(minion_ids)) mine_data = self._get_cached_mine_data(*minion_ids) return mine_data
'Clear the cached data/files for the targeted minions.'
def clear_cached_minion_data(self, clear_pillar=False, clear_grains=False, clear_mine=False, clear_mine_func=None):
clear_what = [] if clear_pillar: clear_what.append('pillar') if clear_grains: clear_what.append('grains') if clear_mine: clear_what.append('mine') if (clear_mine_func is not None): clear_what.append("mine_func: '{0}'".format(clear_mine_func)) if (not len(clear_what)): log.debug('No cached data types specified for clearing.') return False minion_ids = self._tgt_to_list() log.debug('Clearing cached {0} data for: {1}'.format(', '.join(clear_what), minion_ids)) if (clear_pillar == clear_grains): grains = {} pillars = {} else: (grains, pillars) = self._get_cached_minion_data(*minion_ids) try: c_minions = self.cache.ls('minions') for minion_id in minion_ids: if (not salt.utils.verify.valid_id(self.opts, minion_id)): continue if (minion_id not in c_minions): continue bank = 'minions/{0}'.format(minion_id) minion_pillar = pillars.pop(minion_id, False) minion_grains = grains.pop(minion_id, False) if ((clear_pillar and clear_grains) or (clear_pillar and (not minion_grains)) or (clear_grains and (not minion_pillar))): self.cache.flush(bank, 'data') elif (clear_pillar and minion_grains): self.cache.store(bank, 'data', {'grains': minion_grains}) elif (clear_grains and minion_pillar): self.cache.store(bank, 'data', {'pillar': minion_pillar}) if clear_mine: self.cache.flush(bank, 'mine') elif (clear_mine_func is not None): mine_data = self.cache.fetch(bank, 'mine') if isinstance(mine_data, dict): if mine_data.pop(clear_mine_func, False): self.cache.store(bank, 'mine', mine_data) except (OSError, IOError): return True return True
'main loop that fires the event every second'
def run(self):
context = zmq.Context() socket = context.socket(zmq.PUB) socket.setsockopt(zmq.LINGER, 100) socket.bind(('ipc://' + self.timer_sock)) count = 0 log.debug('ConCache-Timer started') while (not self.stopped.wait(1)): socket.send(self.serial.dumps(count)) count += 1 if (count >= 60): count = 0
'Sets up the zmq-connection to the ConCache'
def __init__(self, opts, log_queue=None):
super(CacheWorker, self).__init__(log_queue=log_queue) self.opts = opts
'Gather currently connected minions and update the cache'
def run(self):
new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids()) cc = cache_cli(self.opts) cc.get_cached() cc.put_cache([new_mins]) log.debug('ConCache CacheWorker update finished')
'starts the timer and inits the cache itself'
def __init__(self, opts, log_queue=None):
super(ConnectedCache, self).__init__(log_queue=log_queue) log.debug('ConCache initializing...') self.opts = opts self.minions = [] self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc') self.update_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc') self.upd_t_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc') self.cleanup() self.timer_stop = Event() self.timer = CacheTimer(self.opts, self.timer_stop) self.timer.start() self.running = True
'handle signals and shutdown'
def signal_handler(self, sig, frame):
self.stop()
'remove sockets on shutdown'
def cleanup(self):
log.debug('ConCache cleaning up') if os.path.exists(self.cache_sock): os.remove(self.cache_sock) if os.path.exists(self.update_sock): os.remove(self.update_sock) if os.path.exists(self.upd_t_sock): os.remove(self.upd_t_sock)
'secure the sockets for root-only access'
def secure(self):
log.debug('ConCache securing sockets') if os.path.exists(self.cache_sock): os.chmod(self.cache_sock, 384) if os.path.exists(self.update_sock): os.chmod(self.update_sock, 384) if os.path.exists(self.upd_t_sock): os.chmod(self.upd_t_sock, 384)
'shutdown cache process'
def stop(self):
self.cleanup() if self.running: self.running = False self.timer_stop.set() self.timer.join()
'Main loop of the ConCache, starts updates in intervals and answers requests from the MWorkers'
def run(self):
context = zmq.Context() creq_in = context.socket(zmq.REP) creq_in.setsockopt(zmq.LINGER, 100) creq_in.bind(('ipc://' + self.cache_sock)) cupd_in = context.socket(zmq.SUB) cupd_in.setsockopt(zmq.SUBSCRIBE, '') cupd_in.setsockopt(zmq.LINGER, 100) cupd_in.bind(('ipc://' + self.update_sock)) timer_in = context.socket(zmq.SUB) timer_in.setsockopt(zmq.SUBSCRIBE, '') timer_in.setsockopt(zmq.LINGER, 100) timer_in.connect(('ipc://' + self.upd_t_sock)) poller = zmq.Poller() poller.register(creq_in, zmq.POLLIN) poller.register(cupd_in, zmq.POLLIN) poller.register(timer_in, zmq.POLLIN) serial = salt.payload.Serial(self.opts.get('serial', '')) signal.signal(signal.SIGINT, self.signal_handler) self.secure() log.info('ConCache started') while self.running: try: socks = dict(poller.poll(1)) except KeyboardInterrupt: self.stop() except zmq.ZMQError as zmq_err: log.error('ConCache ZeroMQ-Error occurred') log.exception(zmq_err) self.stop() if (socks.get(creq_in) == zmq.POLLIN): msg = serial.loads(creq_in.recv()) log.debug('ConCache Received request: {0}'.format(msg)) if isinstance(msg, six.string_types): if (msg == 'minions'): reply = serial.dumps(self.minions) creq_in.send(reply) if (socks.get(cupd_in) == zmq.POLLIN): new_c_data = serial.loads(cupd_in.recv()) if (not isinstance(new_c_data, list)): log.error('ConCache Worker returned unusable result') del new_c_data continue try: if (len(new_c_data) == 0): log.debug('ConCache Got empty update from worker') continue data = new_c_data[0] if isinstance(data, six.string_types): if (data not in self.minions): log.debug('ConCache Adding minion {0} to cache'.format(new_c_data[0])) self.minions.append(data) elif isinstance(data, list): log.debug('ConCache Replacing minion list from worker') self.minions = data except IndexError: log.debug('ConCache Got malformed result dict from worker') del new_c_data log.info('ConCache {0} entries in cache'.format(len(self.minions))) if (socks.get(timer_in) == zmq.POLLIN): sec_event = serial.loads(timer_in.recv()) if (int((sec_event % 30)) == 0): cw = CacheWorker(self.opts) cw.start() self.stop() creq_in.close() cupd_in.close() timer_in.close() context.term() log.debug('ConCache Shutting down')
'Make output look like libcloud output for consistency'
def __init__(self, name, server, password=None):
self.name = name self.id = server['id'] self.image = server.get('image', {}).get('id', 'Boot From Volume') self.size = server['flavor']['id'] self.state = server['state'] self._uuid = None self.extra = {'metadata': server['metadata'], 'access_ip': server['accessIPv4']} self.addresses = server.get('addresses', {}) (self.public_ips, self.private_ips) = ([], []) (self.fixed_ips, self.floating_ips) = ([], []) for network in self.addresses.values(): for addr in network: if salt.utils.cloud.is_public_ip(addr['addr']): self.public_ips.append(addr['addr']) else: self.private_ips.append(addr['addr']) if (addr.get('OS-EXT-IPS:type') == 'floating'): self.floating_ips.append(addr['addr']) else: self.fixed_ips.append(addr['addr']) if password: self.extra['password'] = password
'Set up nova credentials'
def __init__(self, username, project_id, auth_url, region_name=None, password=None, os_auth_plugin=None, use_keystoneauth=False, **kwargs):
if all([use_keystoneauth, HAS_KEYSTONEAUTH]): self._new_init(username=username, project_id=project_id, auth_url=auth_url, region_name=region_name, password=password, os_auth_plugin=os_auth_plugin, **kwargs) else: self._old_init(username=username, project_id=project_id, auth_url=auth_url, region_name=region_name, password=password, os_auth_plugin=os_auth_plugin, **kwargs)
'Return service catalog'
def get_catalog(self):
return self.catalog
'Make output look like libcloud output for consistency'
def server_show_libcloud(self, uuid):
server_info = self.server_show(uuid) server = next(six.itervalues(server_info)) server_name = next(six.iterkeys(server_info)) if (not hasattr(self, 'password')): self.password = None ret = NovaServer(server_name, server, self.password) return ret
'Boot a cloud server.'
def boot(self, name, flavor_id=0, image_id=0, timeout=300, **kwargs):
nt_ks = self.compute_conn kwargs['name'] = name kwargs['flavor'] = flavor_id kwargs['image'] = (image_id or None) ephemeral = kwargs.pop('ephemeral', []) block_device = kwargs.pop('block_device', []) boot_volume = kwargs.pop('boot_volume', None) snapshot = kwargs.pop('snapshot', None) swap = kwargs.pop('swap', None) kwargs['block_device_mapping_v2'] = _parse_block_device_mapping_v2(block_device=block_device, boot_volume=boot_volume, snapshot=snapshot, ephemeral=ephemeral, swap=swap) response = nt_ks.servers.create(**kwargs) self.uuid = response.id self.password = getattr(response, 'adminPass', None) start = time.time() trycount = 0 while True: trycount += 1 try: return self.server_show_libcloud(self.uuid) except Exception as exc: log.debug('Server information not yet available: {0}'.format(exc)) time.sleep(1) if ((time.time() - start) > timeout): log.error('Timed out after {0} seconds while waiting for data'.format(timeout)) return False log.debug('Retrying server_show() (try {0})'.format(trycount))
'Find a server by its name (libcloud)'
def show_instance(self, name):
return self.server_by_name(name)
'Change server(uuid\'s) root password'
def root_password(self, server_id, password):
nt_ks = self.compute_conn nt_ks.servers.change_password(server_id, password)
'Find a server by its name'
def server_by_name(self, name):
return self.server_show_libcloud(self.server_list().get(name, {}).get('id', ''))
'Organize information about a volume from the volume_id'
def _volume_get(self, volume_id):
if (self.volume_conn is None): raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn volume = nt_ks.volumes.get(volume_id) response = {'name': volume.display_name, 'size': volume.size, 'id': volume.id, 'description': volume.display_description, 'attachments': volume.attachments, 'status': volume.status} return response
'List all block volumes'
def volume_list(self, search_opts=None):
if (self.volume_conn is None): raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn volumes = nt_ks.volumes.list(search_opts=search_opts) response = {} for volume in volumes: response[volume.display_name] = {'name': volume.display_name, 'size': volume.size, 'id': volume.id, 'description': volume.display_description, 'attachments': volume.attachments, 'status': volume.status} return response
'Show one volume'
def volume_show(self, name):
if (self.volume_conn is None): raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn volumes = self.volume_list(search_opts={'display_name': name}) volume = volumes[name] return volume
'Create a block device'
def volume_create(self, name, size=100, snapshot=None, voltype=None, availability_zone=None):
if (self.volume_conn is None): raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn response = nt_ks.volumes.create(size=size, display_name=name, volume_type=voltype, snapshot_id=snapshot, availability_zone=availability_zone) return self._volume_get(response.id)
'Delete a block device'
def volume_delete(self, name):
if (self.volume_conn is None): raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn try: volume = self.volume_show(name) except KeyError as exc: raise SaltCloudSystemExit('Unable to find {0} volume: {1}'.format(name, exc)) if (volume['status'] == 'deleted'): return volume response = nt_ks.volumes.delete(volume['id']) return volume