desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Wrap Wheel to enable executing :ref:`wheel modules <all-salt.wheel>`
Expects that one of the kwargs is key \'fun\' whose value is the namestring
of the function to call'
| def wheel_sync(self, **kwargs):
| return self.wheelClient.master_call(**kwargs)
|
'Convenience function that returns dict of function signature(s) specified by cmd.
cmd is dict of the form:
\'module\' : \'modulestring\',
\'tgt\' : \'targetpatternstring\',
\'tgt_type\' : \'targetpatterntype\',
\'token\': \'salttokenstring\',
\'username\': \'usernamestring\',
\'password\': \'passwordstring\',
\'eauth\': \'eauthtypestring\',
The cmd dict items are as follows:
module: required. This is either a module or module function name for
the specified client.
tgt: Optional pattern string specifying the targeted minions when client
is \'minion\'
tgt_type: Optional target pattern type string when client is \'minion\'.
Example: \'glob\' defaults to \'glob\' if missing
token: the salt token. Either token: is required or the set of username:,
password: , and eauth:
username: the salt username. Required if token is missing.
password: the user\'s password. Required if token is missing.
eauth: the authentication type such as \'pam\' or \'ldap\'. Required if token is missing
Adds client per the command.'
| def signature(self, cmd):
| cmd[u'client'] = u'minion'
if ((len(cmd[u'module'].split(u'.')) > 2) and (cmd[u'module'].split(u'.')[0] in [u'runner', u'wheel'])):
cmd[u'client'] = u'master'
return self._signature(cmd)
|
'Expects everything that signature does and also a client type string.
client can either be master or minion.'
| def _signature(self, cmd):
| result = {}
client = cmd.get(u'client', u'minion')
if (client == u'minion'):
cmd[u'fun'] = u'sys.argspec'
cmd[u'kwarg'] = dict(module=cmd[u'module'])
result = self.run(cmd)
elif (client == u'master'):
parts = cmd[u'module'].split(u'.')
client = parts[0]
module = u'.'.join(parts[1:])
if (client == u'wheel'):
functions = self.wheelClient.functions
elif (client == u'runner'):
functions = self.runnerClient.functions
result = {u'master': salt.utils.argspec_report(functions, module)}
return result
|
'Create token with creds.
Token authorizes salt access if successful authentication
with the credentials in creds.
creds format is as follows:
\'username\': \'namestring\',
\'password\': \'passwordstring\',
\'eauth\': \'eauthtypestring\',
examples of valid eauth type strings: \'pam\' or \'ldap\'
Returns dictionary of token information with the following format:
\'token\': \'tokenstring\',
\'start\': starttimeinfractionalseconds,
\'expire\': expiretimeinfractionalseconds,
\'name\': \'usernamestring\',
\'user\': \'usernamestring\',
\'username\': \'usernamestring\',
\'eauth\': \'eauthtypestring\',
\'perms: permslistofstrings,
The perms list provides those parts of salt for which the user is authorised
to execute.
example perms list:
"grains.*",
"status.*",
"sys.*",
"test.*"'
| def create_token(self, creds):
| try:
tokenage = self.resolver.mk_token(creds)
except Exception as ex:
raise EauthAuthenticationError(u'Authentication failed with {0}.'.format(repr(ex)))
if (u'token' not in tokenage):
raise EauthAuthenticationError(u'Authentication failed with provided credentials.')
tokenage_eauth = self.opts[u'external_auth'][tokenage[u'eauth']]
if (tokenage[u'name'] in tokenage_eauth):
tokenage[u'perms'] = tokenage_eauth[tokenage[u'name']]
else:
tokenage[u'perms'] = tokenage_eauth[u'*']
tokenage[u'user'] = tokenage[u'name']
tokenage[u'username'] = tokenage[u'name']
return tokenage
|
'If token is valid Then returns user name associated with token
Else False.'
| def verify_token(self, token):
| try:
result = self.resolver.get_token(token)
except Exception as ex:
raise EauthAuthenticationError(u'Token validation failed with {0}.'.format(repr(ex)))
return result
|
'Get a single salt event.
If no events are available, then block for up to ``wait`` seconds.
Return the event if it matches the tag (or ``tag`` is empty)
Otherwise return None
If wait is 0 then block forever or until next event becomes available.'
| def get_event(self, wait=0.25, tag=u'', full=False):
| return self.event.get_event(wait=wait, tag=tag, full=full, auto_reconnect=True)
|
'fires event with data and tag
This only works if api is running with same user permissions as master
Need to convert this to a master call with appropriate authentication'
| def fire_event(self, data, tag):
| return self.event.fire_event(data, tagify(tag, u'wui'))
|
'Return the key string for the SSH public key'
| def get_pubkey(self):
| if ((u'__master_opts__' in self.opts) and self.opts[u'__master_opts__'].get(u'ssh_use_home_key') and os.path.isfile(os.path.expanduser(u'~/.ssh/id_rsa'))):
priv = os.path.expanduser(u'~/.ssh/id_rsa')
else:
priv = self.opts.get(u'ssh_priv', os.path.join(self.opts[u'pki_dir'], u'ssh', u'salt-ssh.rsa'))
pub = u'{0}.pub'.format(priv)
with salt.utils.files.fopen(pub, u'r') as fp_:
return u'{0} rsa root@master'.format(fp_.read().split()[1])
|
'Deploy the SSH key if the minions don\'t auth'
| def key_deploy(self, host, ret):
| if ((not isinstance(ret[host], dict)) or self.opts.get(u'ssh_key_deploy')):
target = self.targets[host]
if (target.get(u'passwd', False) or self.opts[u'ssh_passwd']):
self._key_deploy_run(host, target, False)
return ret
if ret[host].get(u'stderr', u'').count(u'Permission denied'):
target = self.targets[host]
print(u'Permission denied for host {0}, do you want to deploy the salt-ssh key? (password required):'.format(host))
deploy = input(u'[Y/n] ')
if deploy.startswith((u'n', u'N')):
return ret
target[u'passwd'] = getpass.getpass(u'Password for {0}@{1}: '.format(target[u'user'], host))
return self._key_deploy_run(host, target, True)
return ret
|
'The ssh-copy-id routine'
| def _key_deploy_run(self, host, target, re_run=True):
| argv = [u'ssh.set_auth_key', target.get(u'user', u'root'), self.get_pubkey()]
single = Single(self.opts, argv, host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target)
if salt.utils.path.which(u'ssh-copy-id'):
(stdout, stderr, retcode) = single.shell.copy_id()
else:
(stdout, stderr, retcode) = single.run()
if re_run:
target.pop(u'passwd')
single = Single(self.opts, self.opts[u'argv'], host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target)
(stdout, stderr, retcode) = single.cmd_block()
try:
data = salt.utils.find_json(stdout)
return {host: data.get(u'local', data)}
except Exception:
if stderr:
return {host: stderr}
return {host: u'Bad Return'}
if (salt.defaults.exitcodes.EX_OK != retcode):
return {host: stderr}
return {host: stdout}
|
'Run the routine in a "Thread", put a dict on the queue'
| def handle_routine(self, que, opts, host, target, mine=False):
| opts = copy.deepcopy(opts)
single = Single(opts, opts[u'argv'], host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, mine=mine, **target)
ret = {u'id': single.id}
(stdout, stderr, retcode) = single.run()
try:
data = salt.utils.find_json(stdout)
if ((len(data) < 2) and (u'local' in data)):
ret[u'ret'] = data[u'local']
else:
ret[u'ret'] = {u'stdout': stdout, u'stderr': stderr, u'retcode': retcode}
except Exception:
ret[u'ret'] = {u'stdout': stdout, u'stderr': stderr, u'retcode': retcode}
que.put(ret)
|
'Spin up the needed threads or processes and execute the subsequent
routines'
| def handle_ssh(self, mine=False):
| que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
while True:
if (not self.targets):
log.error(u'No matching targets found in roster.')
break
if ((len(running) < self.opts.get(u'ssh_max_procs', 25)) and (not init)):
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if (default not in self.targets[host]):
self.targets[host][default] = self.defaults[default]
if ('host' not in self.targets[host]):
self.targets[host]['host'] = host
args = (que, self.opts, host, self.targets[host], mine)
routine = MultiprocessingProcess(target=self.handle_routine, args=args)
routine.start()
running[host] = {u'thread': routine}
continue
ret = {}
try:
ret = que.get(False)
if (u'id' in ret):
returned.add(ret[u'id'])
(yield {ret[u'id']: ret[u'ret']})
except Exception:
pass
for host in running:
if (not running[host][u'thread'].is_alive()):
if (host not in returned):
try:
while True:
ret = que.get(False)
if (u'id' in ret):
returned.add(ret[u'id'])
(yield {ret[u'id']: ret[u'ret']})
except Exception:
pass
if (host not in returned):
error = u"Target '{0}' did not return any data, probably due to an error.".format(host)
ret = {u'id': host, u'ret': error}
log.error(error)
(yield {ret[u'id']: ret[u'ret']})
running[host][u'thread'].join()
rets.add(host)
for host in rets:
if (host in running):
running.pop(host)
if (len(rets) >= len(self.targets)):
break
if ((len(running) >= self.opts.get(u'ssh_max_procs', 25)) or (len(self.targets) >= len(running))):
time.sleep(0.1)
|
'Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions'
| def run_iter(self, mine=False, jid=None):
| fstr = u'{0}.prep_jid'.format(self.opts[u'master_job_cache'])
jid = self.returners[fstr](passed_jid=(jid or self.opts.get(u'jid', None)))
argv = self.opts[u'argv']
if self.opts.get(u'raw_shell', False):
fun = u'ssh._raw'
args = argv
else:
fun = (argv[0] if argv else u'')
args = argv[1:]
job_load = {u'jid': jid, u'tgt_type': self.tgt_type, u'tgt': self.opts[u'tgt'], u'user': self.opts[u'user'], u'fun': fun, u'arg': args}
if (self.opts[u'master_job_cache'] == u'local_cache'):
self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load, minions=self.targets.keys())
else:
self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load)
for ret in self.handle_ssh(mine=mine):
host = next(six.iterkeys(ret))
self.cache_job(jid, host, ret[host], fun)
if self.event:
self.event.fire_event(ret, salt.utils.event.tagify([jid, u'ret', host], u'job'))
(yield ret)
|
'Cache the job information'
| def cache_job(self, jid, id_, ret, fun):
| self.returners[u'{0}.returner'.format(self.opts[u'master_job_cache'])]({u'jid': jid, u'id': id_, u'return': ret, u'fun': fun})
|
'Execute the overall routine, print results via outputters'
| def run(self, jid=None):
| fstr = u'{0}.prep_jid'.format(self.opts[u'master_job_cache'])
jid = self.returners[fstr](passed_jid=(jid or self.opts.get(u'jid', None)))
argv = self.opts[u'argv']
if self.opts.get(u'raw_shell', False):
fun = u'ssh._raw'
args = argv
else:
fun = (argv[0] if argv else u'')
args = argv[1:]
job_load = {u'jid': jid, u'tgt_type': self.tgt_type, u'tgt': self.opts[u'tgt'], u'user': self.opts[u'user'], u'fun': fun, u'arg': args}
try:
if isinstance(jid, bytes):
jid = jid.decode(u'utf-8')
if (self.opts[u'master_job_cache'] == u'local_cache'):
self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load, minions=self.targets.keys())
else:
self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load)
except Exception as exc:
log.exception(exc)
log.error(u'Could not save load with returner %s: %s', self.opts[u'master_job_cache'], exc)
if self.opts.get(u'verbose'):
msg = u'Executing job with jid {0}'.format(jid)
print(msg)
print(((u'-' * len(msg)) + u'\n'))
print(u'')
sret = {}
outputter = self.opts.get(u'output', u'nested')
final_exit = 0
for ret in self.handle_ssh():
host = next(six.iterkeys(ret))
if isinstance(ret[host], dict):
host_ret = ret[host].get(u'retcode', 0)
if (host_ret != 0):
final_exit = 1
else:
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if (isinstance(ret[host], dict) and ret[host].get(u'stderr', u'').startswith(u'ssh:')):
ret[host] = ret[host][u'stderr']
if (not isinstance(ret[host], dict)):
p_data = {host: ret[host]}
elif (u'return' not in ret[host]):
p_data = ret
else:
outputter = ret[host].get(u'out', self.opts.get(u'output', u'nested'))
p_data = {host: ret[host].get(u'return', {})}
if self.opts.get(u'static'):
sret.update(p_data)
else:
salt.output.display_output(p_data, outputter, self.opts)
if self.event:
self.event.fire_event(ret, salt.utils.event.tagify([jid, u'ret', host], u'job'))
if self.opts.get(u'static'):
salt.output.display_output(sret, outputter, self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
|
'Return the function name and the arg list'
| def __arg_comps(self):
| fun = (self.argv[0] if self.argv else u'')
parsed = salt.utils.args.parse_input(self.argv[1:], condition=False, no_parse=self.opts.get(u'no_parse', []))
args = parsed[0]
kws = parsed[1]
return (fun, args, kws)
|
'Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!'
| def _escape_arg(self, arg):
| if self.winrm:
return arg
return u''.join([((u'\\' + char) if re.match(sdecode('\\W'), char) else char) for char in arg])
|
'Deploy salt-thin'
| def deploy(self):
| self.shell.send(self.thin, os.path.join(self.thin_dir, u'salt-thin.tgz'))
self.deploy_ext()
return True
|
'Deploy the ext_mods tarball'
| def deploy_ext(self):
| if self.mods.get(u'file'):
self.shell.send(self.mods[u'file'], os.path.join(self.thin_dir, u'salt-ext_mods.tgz'))
return True
|
'Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)'
| def run(self, deploy_attempted=False):
| stdout = stderr = retcode = None
if self.opts.get(u'raw_shell', False):
cmd_str = u' '.join([self._escape_arg(arg) for arg in self.argv])
(stdout, stderr, retcode) = self.shell.exec_cmd(cmd_str)
elif ((self.fun in self.wfuncs) or self.mine):
(stdout, retcode) = self.run_wfunc()
else:
(stdout, stderr, retcode) = self.cmd_block()
return (stdout, stderr, retcode)
|
'Execute a wrapper function
Returns tuple of (json_data, \'\')'
| def run_wfunc(self):
| data_cache = False
data = None
cdir = os.path.join(self.opts[u'cachedir'], u'minions', self.id)
if (not os.path.isdir(cdir)):
os.makedirs(cdir)
datap = os.path.join(cdir, u'ssh_data.p')
refresh = False
if (not os.path.isfile(datap)):
refresh = True
else:
passed_time = ((time.time() - os.stat(datap).st_mtime) / 60)
if (passed_time > self.opts.get(u'cache_life', 60)):
refresh = True
if self.opts.get(u'refresh_cache'):
refresh = True
conf_grains = {}
if (u'ssh_grains' in self.opts):
conf_grains = self.opts[u'ssh_grains']
if (not data_cache):
refresh = True
if refresh:
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(self.opts, self.id, fsclient=self.fsclient, minion_opts=self.minion_opts, **self.target)
opts_pkg = pre_wrapper[u'test.opts_pkg']()
if (u'_error' in opts_pkg):
retcode = opts_pkg[u'retcode']
ret = json.dumps({u'local': opts_pkg})
return (ret, retcode)
opts_pkg[u'file_roots'] = self.opts[u'file_roots']
opts_pkg[u'pillar_roots'] = self.opts[u'pillar_roots']
opts_pkg[u'ext_pillar'] = self.opts[u'ext_pillar']
opts_pkg[u'extension_modules'] = self.opts[u'extension_modules']
opts_pkg[u'_ssh_version'] = self.opts[u'_ssh_version']
opts_pkg[u'__master_opts__'] = self.context[u'master_opts']
if (u'_caller_cachedir' in self.opts):
opts_pkg[u'_caller_cachedir'] = self.opts[u'_caller_cachedir']
else:
opts_pkg[u'_caller_cachedir'] = self.opts[u'cachedir']
opts_pkg[u'id'] = self.id
retcode = 0
for grain in conf_grains:
opts_pkg[u'grains'][grain] = conf_grains[grain]
if (u'grains' in self.target):
for grain in self.target[u'grains']:
opts_pkg[u'grains'][grain] = self.target[u'grains'][grain]
popts = {}
popts.update(opts_pkg[u'__master_opts__'])
popts.update(opts_pkg)
pillar = salt.pillar.Pillar(popts, opts_pkg[u'grains'], opts_pkg[u'id'], opts_pkg.get(u'environment', u'base'))
pillar_dirs = {}
pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
data = {u'opts': opts_pkg, u'grains': opts_pkg[u'grains'], u'pillar': pillar_data}
if data_cache:
with salt.utils.files.fopen(datap, u'w+b') as fp_:
fp_.write(self.serial.dumps(data))
if ((not data) and data_cache):
with salt.utils.files.fopen(datap, u'rb') as fp_:
data = self.serial.load(fp_)
opts = data.get(u'opts', {})
opts[u'grains'] = data.get(u'grains')
for grain in conf_grains:
opts[u'grains'][grain] = conf_grains[grain]
if (u'grains' in self.target):
for grain in self.target[u'grains']:
opts[u'grains'][grain] = self.target[u'grains'][grain]
opts[u'pillar'] = data.get(u'pillar')
wrapper = salt.client.ssh.wrapper.FunctionWrapper(opts, self.id, fsclient=self.fsclient, minion_opts=self.minion_opts, **self.target)
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
if self.mine:
mine_args = None
mine_fun_data = None
mine_fun = self.fun
if (self.mine_functions and (self.fun in self.mine_functions)):
mine_fun_data = self.mine_functions[self.fun]
elif (opts[u'pillar'] and (self.fun in opts[u'pillar'].get(u'mine_functions', {}))):
mine_fun_data = opts[u'pillar'][u'mine_functions'][self.fun]
elif (self.fun in self.context[u'master_opts'].get(u'mine_functions', {})):
mine_fun_data = self.context[u'master_opts'][u'mine_functions'][self.fun]
if isinstance(mine_fun_data, dict):
mine_fun = mine_fun_data.pop(u'mine_function', mine_fun)
mine_args = mine_fun_data
elif isinstance(mine_fun_data, list):
for item in mine_fun_data[:]:
if (isinstance(item, dict) and (u'mine_function' in item)):
mine_fun = item[u'mine_function']
mine_fun_data.pop(mine_fun_data.index(item))
mine_args = mine_fun_data
else:
mine_args = mine_fun_data
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[mine_fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = u'TypeError encountered executing {0}: {1}'.format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
except Exception as exc:
result = u'An Exception occurred while executing {0}: {1}'.format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
if (isinstance(result, dict) and (u'local' in result)):
ret = json.dumps({u'local': result[u'local']})
else:
ret = json.dumps({u'local': {u'return': result}})
return (ret, retcode)
|
'Prepare the command string'
| def _cmd_str(self):
| sudo = (u'sudo' if self.target[u'sudo'] else u'')
sudo_user = self.target[u'sudo_user']
if (u'_caller_cachedir' in self.opts):
cachedir = self.opts[u'_caller_cachedir']
else:
cachedir = self.opts[u'cachedir']
thin_sum = salt.utils.thin.thin_sum(cachedir, u'sha1')
debug = u''
if (not self.opts.get(u'log_level')):
self.opts[u'log_level'] = u'info'
if (salt.log.LOG_LEVELS[u'debug'] >= salt.log.LOG_LEVELS[self.opts.get(u'log_level', u'info')]):
debug = u'1'
arg_str = u'\nOPTIONS = OBJ()\nOPTIONS.config = """\n{0}\n"""\nOPTIONS.delimiter = u\'{1}\'\nOPTIONS.saltdir = u\'{2}\'\nOPTIONS.checksum = u\'{3}\'\nOPTIONS.hashfunc = u\'{4}\'\nOPTIONS.version = u\'{5}\'\nOPTIONS.ext_mods = u\'{6}\'\nOPTIONS.wipe = {7}\nOPTIONS.tty = {8}\nOPTIONS.cmd_umask = {9}\nARGS = {10}\n'.format(self.minion_config, RSTR, self.thin_dir, thin_sum, u'sha1', salt.version.__version__, self.mods.get(u'version', u''), self.wipe, self.tty, self.cmd_umask, self.argv)
py_code = SSH_PY_SHIM.replace(u'#%%OPTS', arg_str)
if six.PY2:
py_code_enc = py_code.encode(u'base64')
else:
py_code_enc = base64.encodebytes(py_code.encode(u'utf-8')).decode(u'utf-8')
if (not self.winrm):
cmd = SSH_SH_SHIM.format(DEBUG=debug, SUDO=sudo, SUDO_USER=sudo_user, SSH_PY_CODE=py_code_enc, HOST_PY_MAJOR=sys.version_info[0])
else:
cmd = saltwinshell.gen_shim(py_code_enc)
return cmd
|
'Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there'
| def shim_cmd(self, cmd_str, extension=u'py'):
| if ((not self.tty) and (not self.winrm)):
return self.shell.exec_cmd(cmd_str)
with tempfile.NamedTemporaryFile(mode=u'w+b', prefix=u'shim_', delete=False) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
target_shim_file = u'.{0}.{1}'.format(binascii.hexlify(os.urandom(6)), extension)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
try:
os.remove(shim_tmp_file.name)
except IOError:
pass
if (extension == u'ps1'):
ret = self.shell.exec_cmd(u'"powershell {0}"'.format(target_shim_file))
elif (not self.winrm):
ret = self.shell.exec_cmd(u"/bin/sh '$HOME/{0}'".format(target_shim_file))
else:
ret = saltwinshell.call_python(self, target_shim_file)
if (not self.winrm):
self.shell.exec_cmd(u"rm '$HOME/{0}'".format(target_shim_file))
else:
self.shell.exec_cmd(u'del {0}'.format(target_shim_file))
return ret
|
'Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results'
| def cmd_block(self, is_retry=False):
| self.argv = _convert_args(self.argv)
log.debug(u'Performing shimmed, blocking command as follows:\n%s', u' '.join(self.argv))
cmd_str = self._cmd_str()
(stdout, stderr, retcode) = self.shim_cmd(cmd_str)
log.trace(u'STDOUT %s\n%s', self.target[u'host'], stdout)
log.trace(u'STDERR %s\n%s', self.target[u'host'], stderr)
log.debug(u'RETCODE %s: %s', self.target[u'host'], retcode)
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if (error == u'Python environment not found on Windows system'):
saltwinshell.deploy_python(self)
(stdout, stderr, retcode) = self.shim_cmd(cmd_str)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif (error == u'Undefined SHIM state'):
self.deploy()
(stdout, stderr, retcode) = self.shim_cmd(cmd_str)
if ((not re.search(RSTR_RE, stdout)) or (not re.search(RSTR_RE, stderr))):
return (u'ERROR: Failure deploying thin, undefined state: {0}'.format(stdout), stderr, retcode)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return (u'ERROR: {0}'.format(error), stderr, retcode)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if re.search(RSTR_RE, stderr):
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
shim_command = re.split(sdecode('\\r?\\n'), stdout, 1)[0].strip()
log.debug(u'SHIM retcode(%s) and command: %s', retcode, shim_command)
if ((u'deploy' == shim_command) and (retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY)):
self.deploy()
(stdout, stderr, retcode) = self.shim_cmd(cmd_str)
if ((not re.search(RSTR_RE, stdout)) or (not re.search(RSTR_RE, stderr))):
if (not self.tty):
log.error(u'ERROR: Failure deploying thin, retrying: %s\n%s', stdout, stderr)
return self.cmd_block()
elif (not re.search(RSTR_RE, stdout)):
log.error(u'ERROR: Failure deploying thin, retrying: %s\n%s', stdout, stderr)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = u''
else:
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif (u'ext_mods' == shim_command):
self.deploy_ext()
(stdout, stderr, retcode) = self.shim_cmd(cmd_str)
if ((not re.search(RSTR_RE, stdout)) or (not re.search(RSTR_RE, stderr))):
return (u'ERROR: Failure deploying ext_mods: {0}'.format(stdout), stderr, retcode)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return (stdout, stderr, retcode)
|
'Stub out check_refresh'
| def check_refresh(self, data, ret):
| return
|
'Module refresh is not needed, stub it out'
| def module_refresh(self):
| return
|
'Prepare the arguments'
| def _prep_ssh(self, tgt, fun, arg=(), timeout=None, tgt_type=u'glob', kwarg=None, **kwargs):
| if (u'expr_form' in kwargs):
salt.utils.warn_until(u'Fluorine', u"The target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.")
tgt_type = kwargs.pop(u'expr_form')
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts[u'timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts[u'argv'] = ([fun] + arg)
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = arg
return salt.client.ssh.SSH(opts)
|
'Execute a single command via the salt-ssh subsystem and return a
generator
.. versionadded:: 2015.5.0'
| def cmd_iter(self, tgt, fun, arg=(), timeout=None, tgt_type=u'glob', ret=u'', kwarg=None, **kwargs):
| if (u'expr_form' in kwargs):
salt.utils.warn_until(u'Fluorine', u"The target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.")
tgt_type = kwargs.pop(u'expr_form')
ssh = self._prep_ssh(tgt, fun, arg, timeout, tgt_type, kwarg, **kwargs)
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
(yield ret)
|
'Execute a single command via the salt-ssh subsystem and return all
routines at once
.. versionadded:: 2015.5.0'
| def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type=u'glob', kwarg=None, **kwargs):
| if (u'expr_form' in kwargs):
salt.utils.warn_until(u'Fluorine', u"The target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.")
tgt_type = kwargs.pop(u'expr_form')
ssh = self._prep_ssh(tgt, fun, arg, timeout, tgt_type, kwarg, **kwargs)
final = {}
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
final.update(ret)
return final
|
'Execute a salt-ssh call synchronously.
.. versionadded:: 2015.5.0
WARNING: Eauth is **NOT** respected
.. code-block:: python
client.cmd_sync({
\'tgt\': \'silver\',
\'fun\': \'test.ping\',
\'arg\': (),
\'tgt_type\'=\'glob\',
\'kwarg\'={}
{\'silver\': {\'fun_args\': [], \'jid\': \'20141202152721523072\', \'return\': True, \'retcode\': 0, \'success\': True, \'fun\': \'test.ping\', \'id\': \'silver\'}}'
| def cmd_sync(self, low):
| kwargs = copy.deepcopy(low)
for ignore in [u'tgt', u'fun', u'arg', u'timeout', u'tgt_type', u'kwarg']:
if (ignore in kwargs):
del kwargs[ignore]
return self.cmd(low[u'tgt'], low[u'fun'], low.get(u'arg', []), low.get(u'timeout'), low.get(u'tgt_type'), low.get(u'kwarg'), **kwargs)
|
'Execute aa salt-ssh asynchronously
WARNING: Eauth is **NOT** respected
.. code-block:: python
client.cmd_sync({
\'tgt\': \'silver\',
\'fun\': \'test.ping\',
\'arg\': (),
\'tgt_type\'=\'glob\',
\'kwarg\'={}
{\'silver\': {\'fun_args\': [], \'jid\': \'20141202152721523072\', \'return\': True, \'retcode\': 0, \'success\': True, \'fun\': \'test.ping\', \'id\': \'silver\'}}'
| def cmd_async(self, low, timeout=None):
| raise SaltClientError
|
'Execute a command on a random subset of the targeted systems
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:param sub: The number of systems to execute on
.. code-block:: python
>>> import salt.client.ssh.client
>>> sshclient= salt.client.ssh.client.SSHClient()
>>> sshclient.cmd_subset(\'*\', \'test.ping\', sub=1)
{\'jerry\': True}
.. versionadded:: 2017.7.0'
| def cmd_subset(self, tgt, fun, arg=(), timeout=None, tgt_type=u'glob', ret=u'', kwarg=None, sub=3, **kwargs):
| if (u'expr_form' in kwargs):
salt.utils.warn_until(u'Fluorine', u"The target type should be passed using the 'tgt_type' argument instead of 'expr_form'. Support for using 'expr_form' will be removed in Salt Fluorine.")
tgt_type = kwargs.pop(u'expr_form')
minion_ret = self.cmd(tgt, u'sys.list_functions', tgt_type=tgt_type, **kwargs)
minions = list(minion_ret)
random.shuffle(minions)
f_tgt = []
for minion in minions:
if (fun in minion_ret[minion][u'return']):
f_tgt.append(minion)
if (len(f_tgt) >= sub):
break
return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type=u'list', ret=ret, kwarg=kwarg, **kwargs)
|
'Parse out an error and return a targeted error string'
| def get_error(self, errstr):
| for line in errstr.split(u'\n'):
if line.startswith(u'ssh:'):
return line
if line.startswith(u'Pseudo-terminal'):
continue
if (u'to the list of known hosts.' in line):
continue
return line
return errstr
|
'Return options for the ssh command base for Salt to call'
| def _key_opts(self):
| options = [u'KbdInteractiveAuthentication=no']
if self.passwd:
options.append(u'PasswordAuthentication=yes')
else:
options.append(u'PasswordAuthentication=no')
if (self.opts.get(u'_ssh_version', (0,)) > (4, 9)):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no', u'UserKnownHostsFile=/dev/null'])
known_hosts = self.opts.get(u'known_hosts_file')
if (known_hosts and os.path.isfile(known_hosts)):
options.append(u'UserKnownHostsFile={0}'.format(known_hosts))
if self.port:
options.append(u'Port={0}'.format(self.port))
if self.priv:
options.append(u'IdentityFile={0}'.format(self.priv))
if self.user:
options.append(u'User={0}'.format(self.user))
if self.identities_only:
options.append(u'IdentitiesOnly=yes')
ret = []
for option in options:
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
|
'Return options to pass to ssh'
| def _passwd_opts(self):
| options = [u'ControlMaster=auto', u'StrictHostKeyChecking=no']
if (self.opts[u'_ssh_version'] > (4, 9)):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no', u'UserKnownHostsFile=/dev/null'])
if self.passwd:
options.extend([u'PasswordAuthentication=yes', u'PubkeyAuthentication=yes'])
else:
options.extend([u'PasswordAuthentication=no', u'PubkeyAuthentication=yes', u'KbdInteractiveAuthentication=no', u'ChallengeResponseAuthentication=no', u'BatchMode=yes'])
if self.port:
options.append(u'Port={0}'.format(self.port))
if self.user:
options.append(u'User={0}'.format(self.user))
if self.identities_only:
options.append(u'IdentitiesOnly=yes')
ret = []
for option in options:
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
|
'Return the string to execute ssh-copy-id'
| def _copy_id_str_old(self):
| if self.passwd:
return u"{0} {1} '{2} -p {3} {4} {5}@{6}'".format(u'ssh-copy-id', u'-i {0}.pub'.format(self.priv), self._passwd_opts(), self.port, self._ssh_opts(), self.user, self.host)
return None
|
'Since newer ssh-copy-id commands ingest option differently we need to
have two commands'
| def _copy_id_str_new(self):
| if self.passwd:
return u'{0} {1} {2} -p {3} {4} {5}@{6}'.format(u'ssh-copy-id', u'-i {0}.pub'.format(self.priv), self._passwd_opts(), self.port, self._ssh_opts(), self.user, self.host)
return None
|
'Execute ssh-copy-id to plant the id file on the target'
| def copy_id(self):
| (stdout, stderr, retcode) = self._run_cmd(self._copy_id_str_old())
if ((salt.defaults.exitcodes.EX_OK != retcode) and (u'Usage' in stderr)):
(stdout, stderr, retcode) = self._run_cmd(self._copy_id_str_new())
return (stdout, stderr, retcode)
|
'Return the cmd string to execute'
| def _cmd_str(self, cmd, ssh=u'ssh'):
| command = [ssh]
if (ssh != u'scp'):
command.append(self.host)
if (self.tty and (ssh == u'ssh')):
command.append(u'-t -t')
if (self.passwd or self.priv):
command.append(((self.priv and self._key_opts()) or self._passwd_opts()))
if ((ssh != u'scp') and self.remote_port_forwards):
command.append(u' '.join([u'-R {0}'.format(item) for item in self.remote_port_forwards.split(u',')]))
if self.ssh_options:
command.append(self._ssh_opts())
command.append(cmd)
return u' '.join(command)
|
'Cleanly execute the command string'
| def _old_run_cmd(self, cmd):
| try:
proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
data = proc.communicate()
return (data[0], data[1], proc.returncode)
except Exception:
return (u'local', u'Unknown Error', None)
|
'cmd iterator'
| def _run_nb_cmd(self, cmd):
| try:
proc = salt.utils.nb_popen.NonBlockingPopen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
time.sleep(0.1)
out = proc.recv()
err = proc.recv_err()
rcode = proc.returncode
if ((out is None) and (err is None)):
break
if err:
err = self.get_error(err)
(yield (out, err, rcode))
except Exception:
(yield (u'', u'Unknown Error', None))
|
'Yield None until cmd finished'
| def exec_nb_cmd(self, cmd):
| r_out = []
r_err = []
rcode = None
cmd = self._cmd_str(cmd)
logmsg = u'Executing non-blocking command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
log.debug(logmsg)
for (out, err, rcode) in self._run_nb_cmd(cmd):
if (out is not None):
r_out.append(out)
if (err is not None):
r_err.append(err)
(yield (None, None, None))
(yield (u''.join(r_out), u''.join(r_err), rcode))
|
'Execute a remote command'
| def exec_cmd(self, cmd):
| cmd = self._cmd_str(cmd)
logmsg = u'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
if ((u'decode("base64")' in logmsg) or (u'base64.b64decode(' in logmsg)):
log.debug(u'Executed SHIM command. Command logged to TRACE')
log.trace(logmsg)
else:
log.debug(logmsg)
ret = self._run_cmd(cmd)
return ret
|
'scp a file or files to a remote system'
| def send(self, local, remote, makedirs=False):
| if makedirs:
self.exec_cmd(u'mkdir -p {0}'.format(os.path.dirname(remote)))
host = self.host
if (u':' in host):
host = u'[{0}]'.format(host)
cmd = u'{0} {1}:{2}'.format(local, host, remote)
cmd = self._cmd_str(cmd, ssh=u'scp')
logmsg = u'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
log.debug(logmsg)
return self._run_cmd(cmd)
|
'Execute a shell command via VT. This is blocking and assumes that ssh
is being run'
| def _run_cmd(self, cmd, key_accept=False, passwd_retries=3):
| term = salt.utils.vt.Terminal(cmd, shell=True, log_stdout=True, log_stdout_level=u'trace', log_stderr=True, log_stderr_level=u'trace', stream_stdout=False, stream_stderr=False)
sent_passwd = 0
send_password = True
ret_stdout = u''
ret_stderr = u''
old_stdout = u''
try:
while term.has_unread_data:
(stdout, stderr) = term.recv()
if stdout:
ret_stdout += stdout
buff = (old_stdout + stdout)
else:
buff = stdout
if stderr:
ret_stderr += stderr
if (buff and RSTR_RE.search(buff)):
send_password = False
if (buff and SSH_PASSWORD_PROMPT_RE.search(buff) and send_password):
if (not self.passwd):
return (u'', u'Permission denied, no authentication information', 254)
if (sent_passwd < passwd_retries):
term.sendline(self.passwd)
sent_passwd += 1
continue
else:
return (u'', u'Password authentication failed', 254)
elif (buff and KEY_VALID_RE.search(buff)):
if key_accept:
term.sendline(u'yes')
continue
else:
term.sendline(u'no')
ret_stdout = u'The host key needs to be accepted, to auto accept run salt-ssh with the -i flag:\n{0}'.format(stdout)
return (ret_stdout, u'', 254)
elif (buff and buff.endswith(u'_||ext_mods||_')):
mods_raw = (json.dumps(self.mods, separators=(u',', u':')) + u'|_E|0|')
term.sendline(mods_raw)
if stdout:
old_stdout = stdout
time.sleep(0.01)
return (ret_stdout, ret_stderr, term.exitstatus)
finally:
term.close(terminate=True, kill=True)
|
'We need to implement a __contains__ method, othwerwise when someone
does a contains comparison python assumes this is a sequence, and does
__getitem__ keys 0 and up until IndexError'
| def __contains__(self, key):
| try:
self[key]
return True
except KeyError:
return False
|
'Return the function call to simulate the salt local lookup system'
| def __getitem__(self, cmd):
| if ((u'.' not in cmd) and (not self.cmd_prefix)):
kwargs = copy.deepcopy(self.kwargs)
id_ = kwargs.pop(u'id_')
host = kwargs.pop(u'host')
return FunctionWrapper(self.opts, id_, host, wfuncs=self.wfuncs, mods=self.mods, fsclient=self.fsclient, cmd_prefix=cmd, aliases=self.aliases, minion_opts=self.minion_opts, **kwargs)
if self.cmd_prefix:
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
if (cmd in self.wfuncs):
return self.wfuncs[cmd]
if (cmd in self.aliases):
return self.aliases[cmd]
def caller(*args, **kwargs):
'\n The remote execution function\n '
argv = [cmd]
argv.extend([json.dumps(arg) for arg in args])
argv.extend([u'{0}={1}'.format(key, json.dumps(val)) for (key, val) in six.iteritems(kwargs)])
single = salt.client.ssh.Single(self.opts, argv, mods=self.mods, wipe=True, fsclient=self.fsclient, minion_opts=self.minion_opts, **self.kwargs)
(stdout, stderr, retcode) = single.cmd_block()
if stderr.count(u'Permission Denied'):
return {u'_error': u'Permission Denied', u'stdout': stdout, u'stderr': stderr, u'retcode': retcode}
try:
ret = json.loads(stdout, object_hook=salt.utils.decode_dict)
if ((len(ret) < 2) and (u'local' in ret)):
ret = ret[u'local']
ret = ret.get(u'return', {})
except ValueError:
ret = {u'_error': u'Failed to return clean data', u'stderr': stderr, u'stdout': stdout, u'retcode': retcode}
return ret
return caller
|
'Set aliases for functions'
| def __setitem__(self, cmd, value):
| if ((u'.' not in cmd) and (not self.cmd_prefix)):
raise KeyError(u'Cannot assign to module key {0} in the FunctionWrapper'.format(cmd))
if self.cmd_prefix:
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
if (cmd in self.wfuncs):
self.wfuncs[cmd] = value
self.aliases[cmd] = value
|
'Mirrors behavior of dict.get'
| def get(self, cmd, default):
| if (cmd in self):
return self[cmd]
else:
return default
|
'Load up the modules for remote compilation via ssh'
| def load_modules(self, data=None, proxy=None):
| self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
|
'Stub out check_refresh'
| def check_refresh(self, data, ret):
| return
|
'Module refresh is not needed, stub it out'
| def module_refresh(self):
| return
|
'Stub out load_dynamic'
| def load_dynamic(self, matches):
| return
|
'Evaluate master_tops locally'
| def _master_tops(self):
| if (u'id' not in self.opts):
log.error(u'Received call for external nodes without an id')
return {}
if (not salt.utils.verify.valid_id(self.opts, self.opts[u'id'])):
return {}
grains = {}
ret = {}
if (u'grains' in self.opts):
grains = self.opts[u'grains']
for fun in self.tops:
if (fun not in self.opts.get(u'master_tops', {})):
continue
try:
ret.update(self.tops[fun](opts=self.opts, grains=grains))
except Exception as exc:
log.error(u'Top function %s failed with error %s for minion %s', fun, exc, self.opts[u'id'])
return ret
|
'Run the correct loads serialization format
:param encoding: Useful for Python 3 support. If the msgpack data
was encoded using "use_bin_type=True", this will
differentiate between the \'bytes\' type and the
\'str\' type by decoding contents with \'str\' type
to what the encoding was set as. Recommended
encoding is \'utf-8\' when using Python 3.
If the msgpack data was not encoded using
"use_bin_type=True", it will try to decode
all \'bytes\' and \'str\' data (the distinction has
been lost in this case) to what the encoding is
set as. In this case, it will fail if any of
the contents cannot be converted.'
| def loads(self, msg, encoding=None, raw=False):
| try:
gc.disable()
if (msgpack.version >= (0, 4, 0)):
ret = msgpack.loads(msg, use_list=True, encoding=encoding)
else:
ret = msgpack.loads(msg, use_list=True)
if (six.PY3 and (encoding is None) and (not raw)):
ret = salt.transport.frame.decode_embedded_strs(ret)
except Exception as exc:
log.critical(u'Could not deserialize msgpack message. This often happens when trying to read a file not in binary mode. To see message payload, enable debug logging and retry. Exception: %s', exc)
log.debug(u'Msgpack deserialization failure on message: %s', msg)
gc.collect()
raise
finally:
gc.enable()
return ret
|
'Run the correct serialization to load a file'
| def load(self, fn_):
| data = fn_.read()
fn_.close()
if data:
if six.PY3:
return self.loads(data, encoding=u'utf-8')
else:
return self.loads(data)
|
'Run the correct dumps serialization format
:param use_bin_type: Useful for Python 3 support. Tells msgpack to
differentiate between \'str\' and \'bytes\' types
by encoding them differently.
Since this changes the wire protocol, this
option should not be used outside of IPC.'
| def dumps(self, msg, use_bin_type=False):
| try:
if (msgpack.version >= (0, 4, 0)):
return msgpack.dumps(msg, use_bin_type=use_bin_type)
else:
return msgpack.dumps(msg)
except (OverflowError, msgpack.exceptions.PackValueError):
def verylong_encoder(obj):
if isinstance(obj, dict):
for (key, value) in six.iteritems(obj.copy()):
obj[key] = verylong_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for (idx, entry) in enumerate(obj):
obj[idx] = verylong_encoder(entry)
return obj
if (six.PY2 and isinstance(obj, long) and (long > pow(2, 64))):
return str(obj)
elif (six.PY3 and isinstance(obj, int) and (int > pow(2, 64))):
return str(obj)
else:
return obj
if (msgpack.version >= (0, 4, 0)):
return msgpack.dumps(verylong_encoder(msg), use_bin_type=use_bin_type)
else:
return msgpack.dumps(verylong_encoder(msg))
except TypeError as e:
def default(obj):
return msgpack.ExtType(78, obj)
def dt_encode(obj):
datetime_str = obj.strftime(u'%Y%m%dT%H:%M:%S.%f')
if (msgpack.version >= (0, 4, 0)):
return msgpack.packb(datetime_str, default=default, use_bin_type=use_bin_type)
else:
return msgpack.packb(datetime_str, default=default)
def datetime_encoder(obj):
if isinstance(obj, dict):
for (key, value) in six.iteritems(obj.copy()):
encodedkey = datetime_encoder(key)
if (key != encodedkey):
del obj[key]
key = encodedkey
obj[key] = datetime_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for (idx, entry) in enumerate(obj):
obj[idx] = datetime_encoder(entry)
return obj
if isinstance(obj, datetime.datetime):
return dt_encode(obj)
else:
return obj
def immutable_encoder(obj):
log.debug(u'IMMUTABLE OBJ: %s', obj)
if isinstance(obj, immutabletypes.ImmutableDict):
return dict(obj)
if isinstance(obj, immutabletypes.ImmutableList):
return list(obj)
if isinstance(obj, immutabletypes.ImmutableSet):
return set(obj)
if (u'datetime.datetime' in str(e)):
if (msgpack.version >= (0, 4, 0)):
return msgpack.dumps(datetime_encoder(msg), use_bin_type=use_bin_type)
else:
return msgpack.dumps(datetime_encoder(msg))
elif (u'Immutable' in str(e)):
if (msgpack.version >= (0, 4, 0)):
return msgpack.dumps(msg, default=immutable_encoder, use_bin_type=use_bin_type)
else:
return msgpack.dumps(msg, default=immutable_encoder)
if (msgpack.version >= (0, 2, 0)):
raise
def odict_encoder(obj):
if isinstance(obj, dict):
for (key, value) in six.iteritems(obj.copy()):
obj[key] = odict_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for (idx, entry) in enumerate(obj):
obj[idx] = odict_encoder(entry)
return obj
return obj
if (msgpack.version >= (0, 4, 0)):
return msgpack.dumps(odict_encoder(msg), use_bin_type=use_bin_type)
else:
return msgpack.dumps(odict_encoder(msg))
except (SystemError, TypeError) as exc:
log.critical(u'Unable to serialize message! Consider upgrading msgpack. Message which failed was %s, with exception %s', msg, exc)
|
'Serialize the correct data into the named file object'
| def dump(self, msg, fn_):
| if six.PY2:
fn_.write(self.dumps(msg))
else:
fn_.write(self.dumps(msg, use_bin_type=True))
fn_.close()
|
'Lazily create the socket.'
| @property
def socket(self):
| if (not hasattr(self, u'_socket')):
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, u'RECONNECT_IVL_MAX'):
self._socket.setsockopt(zmq.RECONNECT_IVL_MAX, 5000)
self._set_tcp_keepalive()
if self.master.startswith(u'tcp://['):
if hasattr(zmq, u'IPV6'):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, u'IPV4ONLY'):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
self._socket.setsockopt(zmq.IDENTITY, self.id_)
self._socket.connect(self.master)
return self._socket
|
'delete socket if you have it'
| def clear_socket(self):
| if hasattr(self, u'_socket'):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace(u'Unregistering socket: %s', socket)
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace(u'Unregistering socket: %s', socket)
self.poller.unregister(socket[0])
del self._socket
|
'Takes two arguments, the encryption type and the base payload'
| def send(self, enc, load, tries=1, timeout=60):
| payload = {u'enc': enc}
payload[u'load'] = load
pkg = self.serial.dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
polled = self.poller.poll((timeout * 1000))
tried += 1
if polled:
break
if (tries > 1):
log.info(u'SaltReqTimeoutError: after %s seconds. (Try %s of %s)', timeout, tried, tries)
if (tried >= tries):
self.clear_socket()
raise SaltReqTimeoutError(u'SaltReqTimeoutError: after {0} seconds, ran {1} tries'.format((timeout * tried), tried))
return self.serial.loads(self.socket.recv())
|
'Detect the encryption type based on the payload'
| def send_auto(self, payload, tries=1, timeout=60):
| enc = payload.get(u'enc', u'clear')
load = payload.get(u'load', {})
return self.send(enc, load, tries, timeout)
|
'Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed'
| def process_beacons(self, functions):
| if (u'config.merge' in functions):
b_conf = functions[u'config.merge'](u'beacons', self.opts[u'beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts[u'grains'])
return []
|
'Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), \'failed\' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.'
| @tornado.gen.coroutine
def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False):
| if (opts[u'master_type'] == u'disable'):
log.warning(u'Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
elif ((opts[u'master_type'] != u'str') and (opts[u'__role'] != u'syndic')):
if (opts[u'master_type'] == u'func'):
eval_master_func(opts)
elif (opts[u'master_type'] == u'failover'):
if isinstance(opts[u'master'], list):
log.info(u'Got list of available master addresses: %s', opts[u'master'])
if opts[u'master_shuffle']:
if opts[u'master_failback']:
secondary_masters = opts[u'master'][1:]
shuffle(secondary_masters)
opts[u'master'][1:] = secondary_masters
else:
shuffle(opts[u'master'])
opts[u'auth_tries'] = 0
if (opts[u'master_failback'] and (opts[u'master_failback_interval'] == 0)):
opts[u'master_failback_interval'] = opts[u'master_alive_interval']
elif (isinstance(opts[u'master'], six.string_types) and (u'master_list' not in opts)):
opts[u'master'] = [opts[u'master']]
elif (opts[u'__role'] == u'syndic'):
log.info(u"Syndic setting master_syndic to '%s'", opts[u'master'])
elif failed:
if failback:
opts[u'master'] = opts[u'master_list']
else:
log.info(u'Moving possibly failed master %s to the end of the list of masters', opts[u'master'])
if (opts[u'master'] in opts[u'local_masters']):
failed_master = opts[u'master']
opts[u'master'] = [x for x in opts[u'local_masters'] if (opts[u'master'] != x)]
opts[u'master'].append(failed_master)
else:
opts[u'master'] = opts[u'master_list']
else:
msg = u"master_type set to 'failover' but 'master' is not of type list but of type {0}".format(type(opts[u'master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
if opts[u'retry_dns']:
msg = u"'master_type' set to 'failover' but 'retry_dns' is not 0. Setting 'retry_dns' to 0 to failover to the next master on DNS errors."
log.critical(msg)
opts[u'retry_dns'] = 0
else:
msg = u"Invalid keyword '{0}' for variable 'master_type'".format(opts[u'master_type'])
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
factory_kwargs = {u'timeout': timeout, u'safe': safe}
if getattr(self, u'io_loop', None):
factory_kwargs[u'io_loop'] = self.io_loop
tries = opts.get(u'master_tries', 1)
attempts = 0
if isinstance(opts[u'master'], list):
conn = False
opts[u'local_masters'] = copy.copy(opts[u'master'])
if opts[u'random_master']:
shuffle(opts[u'local_masters'])
last_exc = None
opts[u'master_uri_list'] = list()
for master in opts[u'local_masters']:
opts[u'master'] = master
opts.update(prep_ip_port(opts))
opts[u'master_uri_list'].append(resolve_dns(opts)[u'master_uri'])
while True:
if (attempts != 0):
(yield tornado.gen.sleep(opts[u'acceptance_wait_time']))
attempts += 1
if (tries > 0):
log.debug(u'Connecting to master. Attempt %s of %s', attempts, tries)
else:
log.debug(u'Connecting to master. Attempt %s (infinite attempts)', attempts)
for master in opts[u'local_masters']:
opts[u'master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
if (u'master_list' not in opts):
opts[u'master_list'] = copy.copy(opts[u'local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
(yield pub_channel.connect())
conn = True
break
except SaltClientError as exc:
last_exc = exc
log.info(u'Master %s could not be reached, trying next next master (if any)', opts[u'master'])
continue
if (not conn):
if (attempts == tries):
self.connected = False
self.opts[u'master'] = copy.copy(self.opts[u'local_masters'])
log.error(u"No master could be reached or all masters denied the minion's connection attempt.")
raise last_exc
else:
self.tok = pub_channel.auth.gen_token(u'salt')
self.connected = True
raise tornado.gen.Return((opts[u'master'], pub_channel))
else:
if opts[u'random_master']:
log.warning(u'random_master is True but there is only one master specified. Ignoring.')
while True:
if (attempts != 0):
(yield tornado.gen.sleep(opts[u'acceptance_wait_time']))
attempts += 1
if (tries > 0):
log.debug(u'Connecting to master. Attempt %s of %s', attempts, tries)
else:
log.debug(u'Connecting to master. Attempt %s (infinite attempts)', attempts)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if (self.opts[u'transport'] == u'detect'):
self.opts[u'detect_mode'] = True
for trans in (u'zeromq', u'tcp'):
if ((trans == u'zeromq') and (not HAS_ZMQ)):
continue
self.opts[u'transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
(yield pub_channel.connect())
if (not pub_channel.auth.authenticated):
continue
del self.opts[u'detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
(yield pub_channel.connect())
self.tok = pub_channel.auth.gen_token(u'salt')
self.connected = True
raise tornado.gen.Return((opts[u'master'], pub_channel))
except SaltClientError as exc:
if (attempts == tries):
self.connected = False
raise exc
|
'Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt \'*\' sys.reload_modules'
| def gen_modules(self, initial_load=False):
| self.opts[u'pillar'] = salt.pillar.get_pillar(self.opts, self.opts[u'grains'], self.opts[u'id'], self.opts[u'environment'], pillarenv=self.opts.get(u'pillarenv')).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
self.function_errors = {}
self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions[u'sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
|
'Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt \'*\' sys.reload_modules'
| def gen_modules(self, initial_load=False):
| self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions[u'sys.reload_modules'] = self.gen_modules
|
'Helper function to return the correct type of object'
| def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None):
| return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue)
|
'Spawn all the coroutines which will sign in to masters'
| def _spawn_minions(self):
| masters = self.opts[u'master']
if ((self.opts[u'master_type'] == u'failover') or (not isinstance(self.opts[u'master'], list))):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts[u'master'] = master
s_opts[u'multimaster'] = True
minion = self._create_minion_object(s_opts, s_opts[u'auth_timeout'], False, io_loop=self.io_loop, loaded_base_name=u'salt.loader.{0}'.format(s_opts[u'master']), jid_queue=self.jid_queue)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
|
'Create a minion, and asynchronously connect it to a master'
| @tornado.gen.coroutine
def _connect_minion(self, minion):
| last = 0
auth_wait = minion.opts[u'acceptance_wait_time']
failed = False
while True:
try:
(yield minion.connect_master(failed=failed))
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error(u'Error while bringing up minion for multi-master. Is master at %s responding?', minion.opts[u'master'])
last = time.time()
if (auth_wait < self.max_auth_wait):
auth_wait += self.auth_wait
(yield tornado.gen.sleep(auth_wait))
except Exception as e:
failed = True
log.critical(u'Unexpected error while connecting to %s', minion.opts[u'master'], exc_info=True)
|
'Bind to the masters
This loop will attempt to create connections to masters it hasn\'t connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don\'t know of an API to get the state here in salt)'
| def tune_in(self):
| self._bind()
self._spawn_minions()
self.io_loop.start()
|
'Pass in the options dict'
| def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None):
| super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
self.ready = False
self.jid_queue = jid_queue
if (io_loop is None):
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
zmq_version_info = tuple([int(x) for x in zmq.zmq_version().split(u'.')])
if (zmq_version_info < (3, 2)):
log.warning(u'You have a version of ZMQ less than ZMQ 3.2! There are known connection keep-alive issues with ZMQ < 3.2 which may result in loss of contact with minions. Please upgrade your ZMQ!')
if (not salt.utils.platform.is_proxy()):
self.opts[u'grains'] = salt.loader.grains(opts)
log.info(u'Creating minion process manager')
if self.opts[u'random_startup_delay']:
sleep_time = random.randint(0, self.opts[u'random_startup_delay'])
log.info(u'Minion sleeping for %s seconds due to configured startup_delay between 0 and %s seconds', sleep_time, self.opts[u'random_startup_delay'])
time.sleep(sleep_time)
self.process_manager = ProcessManager(name=u'MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
if (not salt.utils.platform.is_proxy()):
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager)
if (signal.getsignal(signal.SIGINT) is signal.SIG_DFL):
signal.signal(signal.SIGINT, self._handle_signals)
if (signal.getsignal(signal.SIGTERM) is signal.SIG_DFL):
signal.signal(signal.SIGTERM, self._handle_signals)
|
'Block until we are connected to a master'
| def sync_connect_master(self, timeout=None, failed=False):
| self._sync_connect_master_success = False
log.debug(u'sync_connect_master')
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
future_exception = self._connect_master_future.exc_info()
if future_exception:
raise six.reraise(*future_exception)
if (timeout and (self._sync_connect_master_success is False)):
raise SaltDaemonNotRunning(u'Failed to connect to the salt-master')
|
'Return a future which will complete when you are connected to a master'
| @tornado.gen.coroutine
def connect_master(self, failed=False):
| (master, self.pub_channel) = (yield self.eval_master(self.opts, self.timeout, self.safe, failed))
(yield self._post_master_init(master))
|
'Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.'
| @tornado.gen.coroutine
def _post_master_init(self, master):
| if self.connected:
self.opts[u'master'] = master
self.opts[u'pillar'] = (yield salt.pillar.get_async_pillar(self.opts, self.opts[u'grains'], self.opts[u'id'], self.opts[u'environment'], pillarenv=self.opts.get(u'pillarenv')).compile_pillar())
(self.functions, self.returners, self.function_errors, self.executors) = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get(u'user', None))
self.proc_dir = get_proc_dir(self.opts[u'cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(self.opts, self.functions, self.returners, cleanup=[master_event(type=u'alive')])
if (self.opts[u'mine_enabled'] and (u'mine.update' in self.functions)):
self.schedule.add_job({u'__mine_interval': {u'function': u'mine.update', u'minutes': self.opts[u'mine_interval'], u'jid_include': True, u'maxrunning': 2, u'return_job': self.opts.get(u'mine_return_job', False)}}, persist=True)
log.info(u'Added mine.update to scheduler')
else:
self.schedule.delete_job(u'__mine_interval', persist=True)
if ((self.opts[u'transport'] != u'tcp') and (self.opts[u'master_alive_interval'] > 0) and self.connected):
self.schedule.add_job({master_event(type=u'alive', master=self.opts[u'master']): {u'function': u'status.master', u'seconds': self.opts[u'master_alive_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master'], u'connected': True}}}, persist=True)
if (self.opts[u'master_failback'] and (u'master_list' in self.opts) and (self.opts[u'master'] != self.opts[u'master_list'][0])):
self.schedule.add_job({master_event(type=u'failback'): {u'function': u'status.ping_master', u'seconds': self.opts[u'master_failback_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master_list'][0]}}}, persist=True)
else:
self.schedule.delete_job(master_event(type=u'failback'), persist=True)
else:
self.schedule.delete_job(master_event(type=u'alive', master=self.opts[u'master']), persist=True)
self.schedule.delete_job(master_event(type=u'failback'), persist=True)
self.grains_cache = self.opts[u'grains']
self.ready = True
|
'Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.'
| def _return_retry_timer(self):
| msg = u'Minion return retry timer set to {0} seconds'
if self.opts.get(u'return_retry_timer_max'):
try:
random_retry = randint(self.opts[u'return_retry_timer'], self.opts[u'return_retry_timer_max'])
log.debug((msg.format(random_retry) + u' (randomized)'))
return random_retry
except ValueError:
log.error(u'Invalid value (return_retry_timer: %s or return_retry_timer_max: %s). Both must be positive integers.', self.opts[u'return_retry_timer'], self.opts[u'return_retry_timer_max'])
log.debug(msg.format(DEFAULT_MINION_OPTS[u'return_retry_timer']))
return DEFAULT_MINION_OPTS[u'return_retry_timer']
else:
log.debug(msg.format(self.opts.get(u'return_retry_timer')))
return self.opts.get(u'return_retry_timer')
|
'Returns a copy of the opts with key bits stripped out'
| def _prep_mod_opts(self):
| mod_opts = {}
for (key, val) in six.iteritems(self.opts):
if (key == u'logger'):
continue
mod_opts[key] = val
return mod_opts
|
'Return the functions and the returners loaded up from the loader
module'
| def _load_modules(self, force_refresh=False, notify=False, grains=None):
| modules_max_memory = False
if ((self.opts.get(u'modules_max_memory', (-1)) > 0) and HAS_PSUTIL and HAS_RESOURCE):
log.debug(u'modules_max_memory set, enforcing a maximum of %s', self.opts[u'modules_max_memory'])
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
(rss, vms) = psutil.Process(os.getpid()).memory_info()
mem_limit = ((rss + vms) + self.opts[u'modules_max_memory'])
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif (self.opts.get(u'modules_max_memory', (-1)) > 0):
if (not HAS_PSUTIL):
log.error(u'Unable to enforce modules_max_memory because psutil is missing')
if (not HAS_RESOURCE):
log.error(u'Unable to enforce modules_max_memory because resource is missing')
if hasattr(self, u'proxy'):
proxy = self.proxy
else:
proxy = None
if (grains is None):
self.opts[u'grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get(u'multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if (u'_errors' in functions):
errors = functions[u'_errors']
functions.pop(u'_errors')
if (modules_max_memory is True):
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return (functions, returners, errors, executors)
|
'Fire an event on the master, or drop message if unable to send.'
| def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
| load = {u'id': self.opts[u'id'], u'cmd': u'_minion_event', u'pretag': pretag, u'tok': self.tok}
if events:
load[u'events'] = events
elif (data and tag):
load[u'data'] = data
load[u'tag'] = tag
elif ((not data) and tag):
load[u'data'] = {}
load[u'tag'] = tag
else:
return
def timeout_handler(*_):
log.info(u'fire_master failed: master could not be contacted. Request timed out.')
return True
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info(u'fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info(u'fire_master failed: %s', traceback.format_exc())
return False
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=(lambda f: None))
return True
|
'Override this method if you wish to handle the decoded data
differently.'
| def _handle_decoded_payload(self, data):
| if (u'user' in data):
log.info(u'User %s Executing command %s with jid %s', data[u'user'], data[u'fun'], data[u'jid'])
else:
log.info(u'Executing command %s with jid %s', data[u'fun'], data[u'jid'])
log.debug(u'Command details %s', data)
log.trace(u'Started JIDs: %s', self.jid_queue)
if (self.jid_queue is not None):
if (data[u'jid'] in self.jid_queue):
return
else:
self.jid_queue.append(data[u'jid'])
if (len(self.jid_queue) > self.opts[u'minion_jid_queue_hwm']):
self.jid_queue.pop(0)
if isinstance(data[u'fun'], six.string_types):
if (data[u'fun'] == u'sys.reload_modules'):
(self.functions, self.returners, self.function_errors, self.executors) = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
instance = self
multiprocessing_enabled = self.opts.get(u'multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith(u'win'):
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(target=self._target, args=(instance, self.opts, data, self.connected))
else:
process = threading.Thread(target=self._target, args=(instance, self.opts, data, self.connected), name=data[u'jid'])
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
if (multiprocessing_enabled and (not salt.utils.platform.is_windows())):
process.join()
else:
self.win_proc.append(process)
|
'Return a single context manager for the minion\'s data'
| def ctx(self):
| if six.PY2:
return contextlib.nested(self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone())
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
|
'This method should be used as a threading target, start the actual
minion side execution.'
| @classmethod
def _thread_return(cls, minion_instance, opts, data):
| fn_ = os.path.join(minion_instance.proc_dir, data[u'jid'])
if (opts[u'multiprocessing'] and (not salt.utils.platform.is_windows())):
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle(u'{0}._thread_return {1}'.format(cls.__name__, data[u'jid']))
sdata = {u'pid': os.getpid()}
sdata.update(data)
log.info(u'Starting a new job with PID %s', sdata[u'pid'])
with salt.utils.files.fopen(fn_, u'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {u'success': False}
function_name = data[u'fun']
if (function_name in minion_instance.functions):
try:
if (minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False)):
if ((function_name != u'saltutil.refresh_pillar') and (function_name not in minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', []))):
raise SaltInvocationError(u"Minion in blackout mode. Set 'minion_blackout' to False in pillar to resume operations. Only saltutil.refresh_pillar allowed in blackout mode.")
func = minion_instance.functions[function_name]
(args, kwargs) = load_args_and_kwargs(func, data[u'arg'], data)
minion_instance.functions.pack[u'__context__'][u'retcode'] = 0
executors = (data.get(u'module_executors') or opts.get(u'module_executors', [u'direct_call']))
if isinstance(executors, six.string_types):
executors = [executors]
elif ((not isinstance(executors, list)) or (not executors)):
raise SaltInvocationError(u'Wrong executors specification: {0}. String or non-empty list expected'.format(executors))
if (opts.get(u'sudo_user', u'') and (executors[(-1)] != u'sudo')):
executors[(-1)] = u'sudo'
log.trace(u'Executors list %s', executors)
for name in executors:
fname = u'{0}.execute'.format(name)
if (fname not in minion_instance.executors):
raise SaltInvocationError(u"Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if (return_data is not None):
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if (isinstance(single, dict) and isinstance(iret, dict)):
iret.update(single)
else:
if (not iret):
iret = []
iret.append(single)
tag = tagify([data[u'jid'], u'prog', opts[u'id'], str(ind)], u'job')
event_data = {u'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret[u'return'] = iret
else:
ret[u'return'] = return_data
ret[u'retcode'] = minion_instance.functions.pack[u'__context__'].get(u'retcode', 0)
ret[u'success'] = True
except CommandNotFoundError as exc:
msg = u"Command required for '{0}' not found".format(function_name)
log.debug(msg, exc_info=True)
ret[u'return'] = u'{0}: {1}'.format(msg, exc)
ret[u'out'] = u'nested'
except CommandExecutionError as exc:
log.error(u"A command in '%s' had a problem: %s", function_name, exc, exc_info_on_loglevel=logging.DEBUG)
ret[u'return'] = u'ERROR: {0}'.format(exc)
ret[u'out'] = u'nested'
except SaltInvocationError as exc:
log.error(u"Problem executing '%s': %s", function_name, exc, exc_info_on_loglevel=logging.DEBUG)
ret[u'return'] = u"ERROR executing '{0}': {1}".format(function_name, exc)
ret[u'out'] = u'nested'
except TypeError as exc:
msg = u'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret[u'return'] = msg
ret[u'out'] = u'nested'
except Exception:
msg = u'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret[u'return'] = u'{0}: {1}'.format(msg, traceback.format_exc())
ret[u'out'] = u'nested'
else:
docs = minion_instance.functions[u'sys.doc'](u'{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret[u'return'] = docs
else:
ret[u'return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if (mod_name in minion_instance.function_errors):
ret[u'return'] += u" Possible reasons: '{0}'".format(minion_instance.function_errors[mod_name])
ret[u'success'] = False
ret[u'retcode'] = 254
ret[u'out'] = u'nested'
ret[u'jid'] = data[u'jid']
ret[u'fun'] = data[u'fun']
ret[u'fun_args'] = data[u'arg']
if (u'master_id' in data):
ret[u'master_id'] = data[u'master_id']
if (u'metadata' in data):
if isinstance(data[u'metadata'], dict):
ret[u'metadata'] = data[u'metadata']
else:
log.warning(u'The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
if isinstance(opts.get(u'return'), six.string_types):
if data[u'ret']:
data[u'ret'] = u','.join((data[u'ret'], opts[u'return']))
else:
data[u'ret'] = opts[u'return']
log.debug(u'minion return: %s', ret)
if (data[u'ret'] and isinstance(data[u'ret'], six.string_types)):
if (u'ret_config' in data):
ret[u'ret_config'] = data[u'ret_config']
if (u'ret_kwargs' in data):
ret[u'ret_kwargs'] = data[u'ret_kwargs']
ret[u'id'] = opts[u'id']
for returner in set(data[u'ret'].split(u',')):
try:
returner_str = u'{0}.returner'.format(returner)
if (returner_str in minion_instance.returners):
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(u'Returner %s could not be loaded: %s', returner_str, returner_err)
except Exception as exc:
log.exception(u'The return failed for job %s: %s', data[u'jid'], exc)
|
'This method should be used as a threading target, start the actual
minion side execution.'
| @classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
| salt.utils.appendproctitle(u'{0}._thread_multi_return {1}'.format(cls.__name__, data[u'jid']))
ret = {u'return': {}, u'retcode': {}, u'success': {}}
for ind in range(0, len(data[u'fun'])):
ret[u'success'][data[u'fun'][ind]] = False
try:
if (minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False)):
if ((data[u'fun'][ind] != u'saltutil.refresh_pillar') and (data[u'fun'][ind] not in minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', []))):
raise SaltInvocationError(u"Minion in blackout mode. Set 'minion_blackout' to False in pillar to resume operations. Only saltutil.refresh_pillar allowed in blackout mode.")
func = minion_instance.functions[data[u'fun'][ind]]
(args, kwargs) = load_args_and_kwargs(func, data[u'arg'][ind], data)
minion_instance.functions.pack[u'__context__'][u'retcode'] = 0
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(u'retcode', 0)
ret[u'success'][data[u'fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(u'The minion function caused an exception: %s', exc)
ret[u'return'][data[u'fun'][ind]] = trb
ret[u'jid'] = data[u'jid']
ret[u'fun'] = data[u'fun']
ret[u'fun_args'] = data[u'arg']
if (u'metadata' in data):
ret[u'metadata'] = data[u'metadata']
if minion_instance.connected:
minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
if data[u'ret']:
if (u'ret_config' in data):
ret[u'ret_config'] = data[u'ret_config']
if (u'ret_kwargs' in data):
ret[u'ret_kwargs'] = data[u'ret_kwargs']
for returner in set(data[u'ret'].split(u',')):
ret[u'id'] = opts[u'id']
try:
minion_instance.returners[u'{0}.returner'.format(returner)](ret)
except Exception as exc:
log.error(u'The return failed for job %s: %s', data[u'jid'], exc)
|
'Return the data from the executed command to the master server'
| def _return_pub(self, ret, ret_cmd=u'_return', timeout=60, sync=True):
| jid = ret.get(u'jid', ret.get(u'__jid__'))
fun = ret.get(u'fun', ret.get(u'__fun__'))
if self.opts[u'multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
pass
log.info(u'Returning information for job: %s', jid)
if (ret_cmd == u'_syndic_return'):
load = {u'cmd': ret_cmd, u'id': self.opts[u'uid'], u'jid': jid, u'fun': fun, u'arg': ret.get(u'arg'), u'tgt': ret.get(u'tgt'), u'tgt_type': ret.get(u'tgt_type'), u'load': ret.get(u'__load__')}
if (u'__master_id__' in ret):
load[u'master_id'] = ret[u'__master_id__']
load[u'return'] = {}
for (key, value) in six.iteritems(ret):
if key.startswith(u'__'):
continue
load[u'return'][key] = value
else:
load = {u'cmd': ret_cmd, u'id': self.opts[u'id']}
for (key, value) in six.iteritems(ret):
load[key] = value
if (u'out' in ret):
if isinstance(ret[u'out'], six.string_types):
load[u'out'] = ret[u'out']
else:
log.error(u'Invalid outputter %s. This is likely a bug.', ret[u'out'])
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load[u'out'] = oput
if self.opts[u'cache_jobs']:
salt.utils.minion.cache_jobs(self.opts, load[u'jid'], ret)
if (not self.opts[u'pub_ret']):
return u''
def timeout_handler(*_):
log.warning(u'The minion failed to return the job information for job %s. This is often due to the master being shut down or overloaded. If the master is running, consider increasing the worker_threads value.', jid)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return u''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=(lambda f: None))
log.trace(u'ret_val = %s', ret_val)
return ret_val
|
'Execute a state run based on information set in the minion config file'
| def _state_run(self):
| if self.opts[u'startup_states']:
if ((self.opts.get(u'master_type', u'str') == u'disable') and (self.opts.get(u'file_client', u'remote') == u'remote')):
log.warning(u"Cannot run startup_states when 'master_type' is set to 'disable' and 'file_client' is set to 'remote'. Skipping.")
else:
data = {u'jid': u'req', u'ret': self.opts.get(u'ext_job_cache', u'')}
if (self.opts[u'startup_states'] == u'sls'):
data[u'fun'] = u'state.sls'
data[u'arg'] = [self.opts[u'sls_list']]
elif (self.opts[u'startup_states'] == u'top'):
data[u'fun'] = u'state.top'
data[u'arg'] = [self.opts[u'top_file']]
else:
data[u'fun'] = u'state.highstate'
data[u'arg'] = []
self._handle_decoded_payload(data)
|
'Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None'
| def _refresh_grains_watcher(self, refresh_interval_in_minutes):
| if (u'__update_grains' not in self.opts.get(u'schedule', {})):
if (u'schedule' not in self.opts):
self.opts[u'schedule'] = {}
self.opts[u'schedule'].update({u'__update_grains': {u'function': u'event.fire', u'args': [{}, u'grains_refresh'], u'minutes': refresh_interval_in_minutes}})
|
'Refresh the functions and returners.'
| def module_refresh(self, force_refresh=False, notify=False):
| log.debug(u'Refreshing modules. Notify=%s', notify)
(self.functions, self.returners, _, self.executors) = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
|
'Refresh the functions and returners.'
| def beacons_refresh(self):
| log.debug(u'Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
'Refresh the pillar'
| @tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
| if self.connected:
log.debug(u'Refreshing pillar')
try:
self.opts[u'pillar'] = (yield salt.pillar.get_async_pillar(self.opts, self.opts[u'grains'], self.opts[u'id'], self.opts[u'environment'], pillarenv=self.opts.get(u'pillarenv')).compile_pillar())
except SaltClientError:
log.error(u'Pillar data could not be refreshed. One or more masters may be down!')
self.module_refresh(force_refresh)
|
'Refresh the functions and returners.'
| def manage_schedule(self, tag, data):
| func = data.get(u'func', None)
name = data.get(u'name', None)
schedule = data.get(u'schedule', None)
where = data.get(u'where', None)
persist = data.get(u'persist', None)
if (func == u'delete'):
self.schedule.delete_job(name, persist)
elif (func == u'add'):
self.schedule.add_job(schedule, persist)
elif (func == u'modify'):
self.schedule.modify_job(name, schedule, persist)
elif (func == u'enable'):
self.schedule.enable_schedule()
elif (func == u'disable'):
self.schedule.disable_schedule()
elif (func == u'enable_job'):
self.schedule.enable_job(name, persist)
elif (func == u'run_job'):
self.schedule.run_job(name)
elif (func == u'disable_job'):
self.schedule.disable_job(name, persist)
elif (func == u'reload'):
self.schedule.reload(schedule)
elif (func == u'list'):
self.schedule.list(where)
elif (func == u'save_schedule'):
self.schedule.save_schedule()
|
'Manage Beacons'
| def manage_beacons(self, tag, data):
| func = data.get(u'func', None)
name = data.get(u'name', None)
beacon_data = data.get(u'beacon_data', None)
if (func == u'add'):
self.beacons.add_beacon(name, beacon_data)
elif (func == u'modify'):
self.beacons.modify_beacon(name, beacon_data)
elif (func == u'delete'):
self.beacons.delete_beacon(name)
elif (func == u'enable'):
self.beacons.enable_beacons()
elif (func == u'disable'):
self.beacons.disable_beacons()
elif (func == u'enable_beacon'):
self.beacons.enable_beacon(name)
elif (func == u'disable_beacon'):
self.beacons.disable_beacon(name)
elif (func == u'list'):
self.beacons.list_beacons()
|
'Set the salt-minion main process environment according to
the data contained in the minion event data'
| def environ_setenv(self, tag, data):
| environ = data.get(u'environ', None)
if (environ is None):
return False
false_unsets = data.get(u'false_unsets', False)
clear_all = data.get(u'clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
|
'Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running'
| def _pre_tune(self):
| if (self._running is None):
self._running = True
elif (self._running is False):
log.error(u'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__)
return
elif (self._running is True):
log.error(u'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__)
return
try:
log.info(u"%s is starting as user '%s'", self.__class__.__name__, salt.utils.get_user())
except Exception as err:
log.log(((salt.utils.platform.is_windows() and logging.DEBUG) or logging.ERROR), u'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err)
|
'Send mine data to the master'
| def _mine_send(self, tag, data):
| channel = salt.transport.Channel.factory(self.opts)
data[u'tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning(u'Unable to send mine data to master.')
return None
|
'Handle an event from the epull_sock (all local minion events)'
| @tornado.gen.coroutine
def handle_event(self, package):
| if (not self.ready):
raise tornado.gen.Return()
(tag, data) = salt.utils.event.SaltEvent.unpack(package)
log.debug(u"Minion of '%s' is handling event tag '%s'", self.opts[u'master'], tag)
if tag.startswith(u'module_refresh'):
self.module_refresh(force_refresh=data.get(u'force_refresh', False), notify=data.get(u'notify', False))
elif tag.startswith(u'pillar_refresh'):
(yield self.pillar_refresh(force_refresh=data.get(u'force_refresh', False)))
elif tag.startswith(u'beacons_refresh'):
self.beacons_refresh()
elif tag.startswith(u'manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith(u'manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith(u'grains_refresh'):
if (data.get(u'force_refresh', False) or (self.grains_cache != self.opts[u'grains'])):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts[u'grains']
elif tag.startswith(u'environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith(u'_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith(u'fire_master'):
log.debug(u'Forwarding master event tag=%s', data[u'tag'])
self._fire_master(data[u'data'], data[u'tag'], data[u'events'], data[u'pretag'])
elif (tag.startswith(master_event(type=u'disconnected')) or tag.startswith(master_event(type=u'failback'))):
if (tag.startswith(master_event(type=u'disconnected')) and (data[u'master'] != self.opts[u'master'])):
return
if tag.startswith(master_event(type=u'failback')):
if (data[u'master'] != self.opts[u'master_list'][0]):
raise SaltException(u"Bad master '{0}' when mine failback is '{1}'".format(data[u'master'], self.opts[u'master']))
elif (data[u'master'] == self.opts[u'master'][0]):
raise SaltException(u"Already connected to '{0}'".format(data[u'master']))
if self.connected:
self.connected = False
log.info(u'Connection to master %s lost', self.opts[u'master'])
if (self.opts[u'master_type'] != u'failover'):
if (self.opts[u'transport'] != u'tcp'):
schedule = {u'function': u'status.master', u'seconds': self.opts[u'master_alive_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master'], u'connected': False}}
self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']), schedule=schedule)
else:
if (self.opts[u'transport'] != u'tcp'):
self.schedule.delete_job(name=master_event(type=u'alive'))
log.info(u'Trying to tune in to next master from master-list')
if hasattr(self, u'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, u'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, u'close'):
self.pub_channel.close()
del self.pub_channel
try:
(master, self.pub_channel) = (yield self.eval_master(opts=self.opts, failed=True, failback=tag.startswith(master_event(type=u'failback'))))
except SaltClientError:
pass
if self.connected:
self.opts[u'master'] = master
log.info(u'Re-initialising subsystems for new master %s', self.opts[u'master'])
self.opts[u'schedule'] = self.schedule.option(u'schedule')
(self.functions, self.returners, self.function_errors, self.executors) = self._load_modules()
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info(u'Minion is ready to receive requests!')
if (self.opts[u'transport'] != u'tcp'):
schedule = {u'function': u'status.master', u'seconds': self.opts[u'master_alive_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master'], u'connected': True}}
self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']), schedule=schedule)
if (self.opts[u'master_failback'] and (u'master_list' in self.opts)):
if (self.opts[u'master'] != self.opts[u'master_list'][0]):
schedule = {u'function': u'status.ping_master', u'seconds': self.opts[u'master_failback_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master_list'][0]}}
self.schedule.modify_job(name=master_event(type=u'failback'), schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type=u'failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type=u'connected')):
if ((not self.connected) and (self.opts[u'master_type'] != u'failover')):
log.info(u'Connection to master %s re-established', self.opts[u'master'])
self.connected = True
if (self.opts[u'transport'] != u'tcp'):
schedule = {u'function': u'status.master', u'seconds': self.opts[u'master_alive_interval'], u'jid_include': True, u'maxrunning': 1, u'return_job': False, u'kwargs': {u'master': self.opts[u'master'], u'connected': True}}
self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']), schedule=schedule)
elif tag.startswith(u'__schedule_return'):
if data[u'schedule'].startswith(master_event(type=u'alive', master=u'')):
if data[u'return']:
log.debug(u'Connected to master %s', data[u'schedule'].split(master_event(type=u'alive', master=u''))[1])
self._return_pub(data, ret_cmd=u'_return', sync=False)
elif tag.startswith(u'_salt_error'):
if self.connected:
log.debug(u'Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith(u'salt/auth/creds'):
key = tuple(data[u'key'])
log.debug(u'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data[u'creds'])
salt.crypt.AsyncAuth.creds_map[tuple(data[u'key'])] = data[u'creds']
|
'Fallback cleanup routines, attempting to fix leaked processes, threads, etc.'
| def _fallback_cleanups(self):
| multiprocessing.active_children()
if (not salt.utils.platform.is_windows()):
return
for thread in self.win_proc:
if (not thread.is_alive()):
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
|
'Lock onto the publisher. This is the main event loop for the minion
:rtype : None'
| def tune_in(self, start=True):
| self._pre_tune()
log.debug(u"Minion '%s' trying to tune in", self.opts[u'id'])
if start:
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info(u'Minion is ready to receive requests!')
enable_sigusr1_handler()
salt.utils.enable_ctrl_logoff_handler()
self._state_run()
loop_interval = self.opts[u'loop_interval']
try:
if self.opts[u'grains_refresh_every']:
log.debug(u'Enabling the grains refresher. Will run every %s minute%s.', self.opts[u'grains_refresh_every'], (u's' if (self.opts[u'grains_refresh_every'] > 1) else u''))
self._refresh_grains_watcher(abs(self.opts[u'grains_refresh_every']))
except Exception as exc:
log.error(u'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: %s', exc)
self.periodic_callbacks = {}
ping_interval = (self.opts.get(u'ping_interval', 0) * 60)
if ((ping_interval > 0) and self.connected):
def ping_master():
try:
if (not self._fire_master(u'ping', u'minion_ping')):
if (not self.opts.get(u'auth_safemode', True)):
log.error(u'** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get(u'random_reauth_delay', 5)
log.info(u'delaying random_reauth_delay %ss', delay)
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
except Exception:
log.warning(u'Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks[u'ping'] = tornado.ioloop.PeriodicCallback(ping_master, (ping_interval * 1000), io_loop=self.io_loop)
self.periodic_callbacks[u'cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, (loop_interval * 1000), io_loop=self.io_loop)
def handle_beacons():
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical(u'The beacon errored: ', exc_info=True)
if (beacons and self.connected):
self._fire_master(events=beacons)
self.periodic_callbacks[u'beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, (loop_interval * 1000), io_loop=self.io_loop)
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, u'schedule'):
self.periodic_callbacks[u'schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
if (hasattr(self, u'pub_channel') and (self.pub_channel is not None)):
self.pub_channel.on_recv(self._handle_payload)
elif (self.opts.get(u'master_type') != u'disable'):
log.error(u'No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError):
self.destroy()
|
'Tear down the minion'
| def destroy(self):
| self._running = False
if hasattr(self, u'schedule'):
del self.schedule
if (hasattr(self, u'pub_channel') and (self.pub_channel is not None)):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, u'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, u'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
|
'Override this method if you wish to handle the decoded data
differently.'
| def _handle_decoded_payload(self, data):
| data[u'to'] = (int(data.get(u'to', self.opts[u'timeout'])) - 1)
if (data.get(u'master_id', 0) != self.opts.get(u'master_id', 1)):
self.syndic_cmd(data)
|
'Take the now clear load and forward it on to the client cmd'
| def syndic_cmd(self, data):
| if (u'tgt_type' not in data):
data[u'tgt_type'] = u'glob'
kwargs = {}
for field in (u'master_id', u'user'):
if (field in data):
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning(u'Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data[u'tgt'], data[u'fun'], data[u'arg'], data[u'tgt_type'], data[u'ret'], data[u'jid'], data[u'to'], io_loop=self.io_loop, callback=(lambda _: None), **kwargs)
|
'Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence'
| def tune_in_no_block(self):
| self.local = salt.client.get_local_client(self.opts[u'_minion_conf_file'], io_loop=self.io_loop)
self.pub_channel.on_recv(self._process_cmd_socket)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.