desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'test cmd.run creates already there'
def test_run_creates_exists(self):
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file) with salt.utils.files.fopen(self.state_file, 'w') as fb_: fb_.write(textwrap.dedent('\n echo >> {0}:\n cmd.run:\n - creates: {0}\n '.format(self.test_file))) ret = self.run_function('state.sls', [self.state_name]) self.assertTrue(ret[state_key]['result']) self.assertEqual(len(ret[state_key]['changes']), 0)
'test cmd.run creates not there'
def test_run_creates_new(self):
os.remove(self.test_file) state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file) with salt.utils.files.fopen(self.state_file, 'w') as fb_: fb_.write(textwrap.dedent('\n echo >> {0}:\n cmd.run:\n - creates: {0}\n '.format(self.test_file))) ret = self.run_function('state.sls', [self.state_name]) self.assertTrue(ret[state_key]['result']) self.assertEqual(len(ret[state_key]['changes']), 4)
'test cmd.run with shell redirect'
def test_run_redirect(self):
state_key = 'cmd_|-echo test > {0}_|-echo test > {0}_|-run'.format(self.test_file) with salt.utils.files.fopen(self.state_file, 'w') as fb_: fb_.write(textwrap.dedent('\n echo test > {0}:\n cmd.run\n '.format(self.test_file))) ret = self.run_function('state.sls', [self.state_name]) self.assertTrue(ret[state_key]['result'])
'test cmd.run watch'
def test_run_watch(self):
saltines_key = 'cmd_|-saltines_|-echo changed=true_|-run' biscuits_key = 'cmd_|-biscuits_|-echo biscuits_|-wait' with salt.utils.files.fopen(self.state_file, 'w') as fb_: fb_.write(textwrap.dedent('\n saltines:\n cmd.run:\n - name: echo changed=true\n - cwd: /\n - stateful: True\n\n biscuits:\n cmd.wait:\n - name: echo biscuits\n - cwd: /\n - watch:\n - cmd: saltines\n ')) ret = self.run_function('state.sls', [self.state_name]) self.assertTrue(ret[saltines_key]['result']) self.assertTrue(ret[biscuits_key]['result'])
'function to check if file was extracted'
def _check_extracted(self, path):
log.debug('Checking for extracted file: %s', path) self.assertTrue(os.path.isfile(path))
'test archive.extracted with skip_verify'
def test_archive_extracted_skip_verify(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', skip_verify=True) log.debug('ret = %s', ret) if ('Timeout' in ret): self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted without skip_verify only external resources work to check to ensure source_hash is verified correctly'
def test_archive_extracted_with_source_hash(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) log.debug('ret = %s', ret) if ('Timeout' in ret): self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted with user and group set to "root"'
@skip_if_not_root def test_archive_extracted_with_root_user_and_group(self):
r_group = 'root' if salt.utils.platform.is_darwin(): r_group = 'wheel' ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH, user='root', group=r_group) log.debug('ret = %s', ret) if ('Timeout' in ret): self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted with --strip in options'
def test_archive_extracted_with_strip_in_options(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, source_hash=ARCHIVE_TAR_HASH, options='--strip=1', enforce_toplevel=False) log.debug('ret = %s', ret) if ('Timeout' in ret): self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) self._check_extracted(os.path.join(ARCHIVE_DIR, 'README'))
'test archive.extracted with --strip-components in options'
def test_archive_extracted_with_strip_components_in_options(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, source_hash=ARCHIVE_TAR_HASH, options='--strip-components=1', enforce_toplevel=False) log.debug('ret = %s', ret) if ('Timeout' in ret): self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) self._check_extracted(os.path.join(ARCHIVE_DIR, 'README'))
'test archive.extracted with no archive_format option'
def test_archive_extracted_without_archive_format(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, source_hash=ARCHIVE_TAR_HASH) log.debug('ret = %s', ret) if ('Timeout' in ret): self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted using use_cmd_unzip argument as false'
def test_archive_extracted_with_cmd_unzip_false(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, source_hash=ARCHIVE_TAR_HASH, use_cmd_unzip=False, archive_format='tar') log.debug('ret = %s', ret) if ('Timeout' in ret): self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted with local file'
def test_local_archive_extracted(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar') log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted with local file, bad hash and skip_verify'
def test_local_archive_extracted_skip_verify(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH, skip_verify=True) log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted with local file and valid hash'
def test_local_archive_extracted_with_source_hash(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) self._check_extracted(UNTAR_FILE)
'test archive.extracted with local file and bad hash'
def test_local_archive_extracted_with_bad_source_hash(self):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH) log.debug('ret = %s', ret) self.assertSaltFalseReturn(ret)
'Directory that will contains all of the static and dynamic files for the daemon'
@property def test_dir(self):
return os.path.join(self.parent_dir, self._root_dir)
'Get the filename (viz. path) to the configuration file'
def config_file_get(self, config):
cfgf = self.configs[config].get('path') if cfgf: cfgf.format(**self.config_subs()) else: cfgf = os.path.join(self.config_dir, config) return cfgf
'Get the parent directory for the configuration file'
def config_dir_get(self, config):
return os.path.dirname(self.config_file_get(config))
'Return a list of configuration directories'
@property def config_dirs(self):
cdirs = [self.config_dir_get(config) for config in self.configs.keys()] return cdirs
'Absolute path of file including the test_dir'
def abs_path(self, path):
return os.path.join(self.test_dir, path)
'PID of the called script prior to daemonizing.'
@property def start_pid(self):
return (self.process.pid if self.process else None)
'Directory that contains everything generated for running scripts - possibly for multiple scripts.'
@property def parent_dir(self):
if (self._parent_dir is None): self.created_parent_dir = True self._parent_dir = tempfile.mkdtemp(prefix='salt-testdaemon-') else: self._parent_dir = os.path.abspath(os.path.normpath(self._parent_dir)) if (not os.path.exists(self._parent_dir)): self.created_parent_dir = True os.makedirs(self._parent_dir) elif (not os.path.isdir(self._parent_dir)): raise ValueError('Parent path "{0}" exists but is not a directory'.format(self._parent_dir)) return self._parent_dir
'Write out the config to a file'
def config_write(self, config):
if (not config): return cpath = self.abs_path(self.config_file_get(config)) with salt.utils.files.fopen(cpath, 'w') as cfo: cfg = self.config_stringify(config) log.debug('Writing configuration for {0} to {1}:\n{2}'.format(self.name, cpath, cfg)) cfo.write(cfg) cfo.flush()
'Write all configuration files'
def configs_write(self):
for config in self.configs: self.config_write(config)
'Check if a configuration is an acceptable type.'
def config_type(self, config):
return isinstance(config, self.config_types)
'Cast a configuration to the internal expected type.'
def config_cast(self, config):
if (not self.config_type(config)): config = self.config_caster(config) return config
'Get the substitution values for use to generate the config'
def config_subs(self):
subs = dict([(attr, getattr(self, attr, None)) for attr in self.config_attrs]) for (key, val) in self.config_vals.items(): subs[key] = val.format(**subs) return subs
'Get the configuration as a string'
def config_stringify(self, config):
cfg = self.config_get(config) cfg.format(**self.config_subs()) return cfg
'Merge two configuration hunks'
def config_merge(self, base, overrides):
base = self.config_cast(base) overrides = self.config_cast(overrides) return ''.join([base, overrides])
'Get the configuration data'
def config_get(self, config):
return self.configs[config]
'Set the configuration data'
def config_set(self, config, val):
self.configs[config] = val
'Create directory structure.'
def make_dirtree(self):
subdirs = [] for branch in self.dirtree: log.debug('checking dirtree: {0}'.format(branch)) if (not branch): continue if (isinstance(branch, six.string_types) and (branch[0] == '&')): log.debug('Looking up dirtree branch "{0}"'.format(branch)) try: dirattr = getattr(self, branch[1:], None) log.debug('dirtree "{0}" => "{1}"'.format(branch, dirattr)) except AttributeError: raise ValueError('Unable to find dirtree attribute "{0}" on object "{1}.name = {2}: {3}"'.format(branch, self.__class__.__name__, self.name, dir(self))) if (not dirattr): continue if isinstance(dirattr, six.string_types): subdirs.append(dirattr) elif hasattr(dirattr, '__iter__'): subdirs.extend(dirattr) else: raise TypeError('Branch type of {0} in dirtree is unhandled'.format(branch)) elif isinstance(branch, six.string_types): subdirs.append(branch) else: raise TypeError('Branch type of {0} in dirtree is unhandled'.format(branch)) for subdir in subdirs: path = self.abs_path(subdir) if (not os.path.exists(path)): log.debug('make_dirtree: {0}'.format(path)) os.makedirs(path)
'Create any scaffolding for run-time'
def setup(self, *args, **kwargs):
_ = (args, kwargs) if (not self._setup_done): self.make_dirtree() self.configs_write() self._setup_done = True
'Clean out scaffolding of setup() and any run-time generated files.'
def cleanup(self, *args, **kwargs):
_ = (args, kwargs) if self.process: try: self.process.kill() self.process.wait() except OSError: pass if os.path.exists(self.test_dir): shutil.rmtree(self.test_dir) if (self.created_parent_dir and os.path.exists(self.parent_dir)): shutil.rmtree(self.parent_dir)
'Execute a command possibly using a supplied environment. :param args: A command string or a command sequence of arguments for the program. :param catch_stderr: A boolean whether to capture and return stderr. :param with_retcode: A boolean whether to return the exit code. :param timeout: A float of how long to wait for the process to complete before it is killed. :param raw: A boolean whether to return buffer strings for stdout and stderr or sequences of output lines. :param env: A dictionary of environment key/value settings for the command. :param verbatim_args: A boolean whether to automatically add inferred arguments. :param verbatim_env: A boolean whether to automatically add inferred environment values. :return list: (stdout [,stderr] [,retcode])'
def run(self, args=None, catch_stderr=False, with_retcode=False, timeout=None, raw=False, env=None, verbatim_args=False, verbatim_env=False):
_ = verbatim_args self.setup() if (args is None): args = [] if (env is None): env = {} env_delta = {} env_delta.update(self.env) env_delta.update(env) if (not verbatim_env): env_pypath = env_delta.get('PYTHONPATH', os.environ.get('PYTHONPATH')) if (not env_pypath): env_pypath = sys.path else: env_pypath = env_pypath.split(':') for path in sys.path: if (path not in env_pypath): env_pypath.append(path) if (CODE_DIR != env_pypath[0]): env_pypath.insert(0, CODE_DIR) env_delta['PYTHONPATH'] = ':'.join(env_pypath) cmd_env = dict(os.environ) cmd_env.update(env_delta) popen_kwargs = {'shell': self.shell, 'stdout': subprocess.PIPE, 'env': cmd_env} if (catch_stderr is True): popen_kwargs['stderr'] = subprocess.PIPE if (not sys.platform.lower().startswith('win')): popen_kwargs['close_fds'] = True def detach_from_parent_group(): '\n A utility function that prevents child process from getting parent signals.\n ' os.setpgrp() popen_kwargs['preexec_fn'] = detach_from_parent_group elif (sys.platform.lower().startswith('win') and (timeout is not None)): raise RuntimeError('Timeout is not supported under windows') self.argv = [self.program] self.argv.extend(args) log.debug('TestProgram.run: %s Environment %s', self.argv, env_delta) process = subprocess.Popen(self.argv, **popen_kwargs) self.process = process if (timeout is not None): stop_at = (datetime.now() + timedelta(seconds=timeout)) term_sent = False while True: process.poll() if (datetime.now() > stop_at): if (term_sent is False): os.killpg(os.getpgid(process.pid), signal.SIGINT) term_sent = True continue try: os.killpg(os.getpgid(process.pid), signal.SIGKILL) process.wait() except OSError as exc: if (exc.errno != errno.ESRCH): raise out = process.stdout.read().splitlines() out.extend(['Process took more than {0} seconds to complete. Process Killed!'.format(timeout)]) if catch_stderr: err = process.stderr.read().splitlines() if with_retcode: return (out, err, process.returncode) else: return (out, err) if with_retcode: return (out, process.returncode) else: return out if (process.returncode is not None): break if catch_stderr: if (sys.version_info < (2, 7)): process.wait() out = process.stdout.read() err = process.stderr.read() else: (out, err) = process.communicate() if (process.stdout is not None): process.stdout.close() if (process.stderr is not None): process.stderr.close() try: if with_retcode: if ((out is not None) and (err is not None)): if (not raw): return (out.splitlines(), err.splitlines(), process.returncode) else: return (out, err, process.returncode) return (out.splitlines(), [], process.returncode) else: if ((out is not None) and (err is not None)): if (not raw): return (out.splitlines(), err.splitlines()) else: return (out, err) if (not raw): return (out.splitlines(), []) else: return (out, []) finally: try: process.terminate() except OSError as err: pass data = process.communicate() process.stdout.close() try: if with_retcode: if (not raw): return (data[0].splitlines(), process.returncode) else: return (data[0], process.returncode) elif (not raw): return data[0].splitlines() else: return data[0] finally: try: process.terminate() except OSError as err: pass
'Transform the configuration data into a string (suitable to write to a file)'
def config_stringify(self, config):
subs = self.config_subs() cfg = {} for (key, val) in self.config_get(config).items(): if isinstance(val, six.string_types): cfg[key] = val.format(**subs) else: cfg[key] = val scfg = yaml.safe_dump(cfg, default_flow_style=False) return scfg
'Generate the script file that calls python objects and libraries.'
def install_script(self):
lines = [] script_source = os.path.join(CODE_DIR, 'scripts', self.script) with salt.utils.files.fopen(script_source, 'r') as sso: lines.extend(sso.readlines()) if lines[0].startswith('#!'): lines.pop(0) lines.insert(0, '#!{0}\n'.format(sys.executable)) script_path = self.abs_path(os.path.join(self.script_dir, self.script)) log.debug('Installing "{0}" to "{1}"'.format(script_source, script_path)) with salt.utils.files.fopen(script_path, 'w') as sdo: sdo.write(''.join(lines)) sdo.flush() os.chmod(script_path, 493)
'Path to the pid file created by the daemon'
@property def pid_path(self):
return (os.path.join(self.pid_dir, self.pid_file) if (os.path.sep not in self.pid_file) else self.pid_file)
'Return the daemon PID'
@property def daemon_pid(self):
daemon_pid = None pid_path = self.abs_path(self.pid_path) if salt.utils.process.check_pidfile(pid_path): daemon_pid = salt.utils.process.get_pidfile(pid_path) return daemon_pid
'Wait up to timeout seconds for the PID file to appear and return the PID'
def wait_for_daemon_pid(self, timeout=10):
endtime = (time.time() + timeout) while True: pid = self.daemon_pid if pid: return pid if (endtime < time.time()): raise TimeoutError('Timeout waiting for "{0}" pid in "{1}"'.format(self.name, self.abs_path(self.pid_path))) time.sleep(0.2)
'Is the daemon running?'
def is_running(self):
ret = False if (not self._shutdown): try: pid = self.wait_for_daemon_pid() ret = psutils.pid_exists(pid) except TimeoutError: pass return ret
'Find orphaned processes matching the specified cmdline'
def find_orphans(self, cmdline):
ret = [] if six.PY3: cmdline = ' '.join(cmdline) for proc in psutils.process_iter(): try: for item in proc.cmdline(): if (cmdline in item): ret.append(proc) except psutils.NoSuchProcess: continue else: cmd_len = len(cmdline) for proc in psutils.process_iter(): try: proc_cmdline = proc.cmdline() except psutils.NoSuchProcess: continue if any(((cmdline == proc_cmdline[n:(n + cmd_len)]) for n in range(((len(proc_cmdline) - cmd_len) + 1)))): ret.append(proc) return ret
'Shutdown a running daemon'
def shutdown(self, signum=signal.SIGTERM, timeout=10, wait_for_orphans=0):
if (not self._shutdown): try: pid = self.wait_for_daemon_pid(timeout) terminate_process(pid=pid, kill_children=True) except TimeoutError: pass if self.process: terminate_process(pid=self.process.pid, kill_children=True) self.process.wait() if wait_for_orphans: orphans = self.find_orphans(self.argv) last = time.time() while True: if orphans: log.debug('Terminating orphaned child processes: %s', orphans) terminate_process_list(orphans) last = time.time() if ((time.time() - last) >= wait_for_orphans): break time.sleep(0.25) orphans = self.find_orphans(self.argv) self.process = None self._shutdown = True
'Remove left-over scaffolding - antithesis of setup()'
def cleanup(self, *args, **kwargs):
self.shutdown() super(TestDaemon, self).cleanup(*args, **kwargs)
'Helper function to verify exit status and emit failure information.'
def assert_exit_status(self, status, ex_status, message=None, stdout=None, stderr=None):
ex_val = getattr(exitcodes, ex_status) _message = ('' if (not message) else ' ({0})'.format(message)) _stdout = ('' if (not stdout) else '\nstdout: {0}'.format(stdout)) _stderr = ('' if (not stderr) else '\nstderr: {0}'.format(stderr)) self.assertEqual(status, ex_val, 'Exit status was {0}, must be {1} (salt.default.exitcodes.{2}){3}{4}{5}'.format(status, ex_val, ex_status, _message, _stdout, _stderr))
'Create the shared pki directory'
def _pki_dir(self):
path = os.path.join(self.swarm_root, 'pki') if (not os.path.exists(path)): os.makedirs(path) print('Creating shared pki keys for the swarm on: {0}'.format(path)) subprocess.call('salt-key -c {0} --gen-keys minion --gen-keys-dir {0} --log-file {1} --user {2}'.format(path, os.path.join(path, 'keys.log'), self.opts['user']), shell=True) print('Keys generated') return path
'Start the magic!!'
def start(self):
if self.opts['master_too']: master_swarm = MasterSwarm(self.opts) master_swarm.start() minions = MinionSwarm(self.opts) minions.start_minions() print('Starting minions...') print('All {0} minions have started.'.format(self.opts['minions'])) print('Waiting for CTRL-C to properly shutdown minions...') while True: try: time.sleep(5) except KeyboardInterrupt: print('\nShutting down minions') self.clean_configs() break
'Tear it all down'
def shutdown(self):
print('Killing any remaining running minions') subprocess.call('pkill -KILL -f "python.*salt-minion"', shell=True) if self.opts['master_too']: print('Killing any remaining masters') subprocess.call('pkill -KILL -f "python.*salt-master"', shell=True) if (not self.opts['no_clean']): print('Remove ALL related temp files/directories') shutil.rmtree(self.swarm_root) print('Done')
'Clean up the config files'
def clean_configs(self):
for path in self.confs: pidfile = '{0}.pid'.format(path) try: try: pid = int(open(pidfile).read().strip()) os.kill(pid, signal.SIGTERM) except ValueError: pass if os.path.exists(pidfile): os.remove(pidfile) if (not self.opts['no_clean']): shutil.rmtree(path) except (OSError, IOError): pass
'Iterate over the config files and start up the minions'
def start_minions(self):
self.prep_configs() for path in self.confs: cmd = 'salt-minion -c {0} --pid-file {1}'.format(path, '{0}.pid'.format(path)) if self.opts['foreground']: cmd += ' -l debug &' else: cmd += ' -d &' subprocess.call(cmd, shell=True) time.sleep(self.opts['start_delay'])
'Create a config file for a single minion'
def mkconf(self, idx):
data = {} if self.opts['config_dir']: spath = os.path.join(self.opts['config_dir'], 'minion') with open(spath) as conf: data = (yaml.load(conf) or {}) minion_id = '{0}-{1}'.format(self.opts['name'], str(idx).zfill(self.zfill)) dpath = os.path.join(self.swarm_root, minion_id) if (not os.path.exists(dpath)): os.makedirs(dpath) data.update({'id': minion_id, 'user': self.opts['user'], 'cachedir': os.path.join(dpath, 'cache'), 'master': self.opts['master'], 'log_file': os.path.join(dpath, 'minion.log'), 'grains': {}}) if (self.opts['transport'] == 'zeromq'): minion_pkidir = os.path.join(dpath, 'pki') if (not os.path.exists(minion_pkidir)): os.makedirs(minion_pkidir) minion_pem = os.path.join(self.pki, 'minion.pem') minion_pub = os.path.join(self.pki, 'minion.pub') shutil.copy(minion_pem, minion_pkidir) shutil.copy(minion_pub, minion_pkidir) data['pki_dir'] = minion_pkidir elif (self.opts['transport'] == 'raet'): data['transport'] = 'raet' data['sock_dir'] = os.path.join(dpath, 'sock') data['raet_port'] = self.raet_port data['pki_dir'] = os.path.join(dpath, 'pki') self.raet_port += 1 elif (self.opts['transport'] == 'tcp'): data['transport'] = 'tcp' if self.opts['root_dir']: data['root_dir'] = self.opts['root_dir'] path = os.path.join(dpath, 'minion') if self.opts['keep']: keep = self.opts['keep'].split(',') modpath = os.path.join(os.path.dirname(salt.__file__), 'modules') fn_prefixes = (fn_.partition('.')[0] for fn_ in os.listdir(modpath)) ignore = [fn_prefix for fn_prefix in fn_prefixes if (fn_prefix not in keep)] data['disable_modules'] = ignore if self.opts['rand_os']: data['grains']['os'] = random.choice(OSES) if self.opts['rand_ver']: data['grains']['saltversion'] = random.choice(VERS) if self.opts['rand_machine_id']: data['grains']['machine_id'] = hashlib.md5(minion_id).hexdigest() if self.opts['rand_uuid']: data['grains']['uuid'] = str(uuid.uuid4()) with open(path, 'w+') as fp_: yaml.dump(data, fp_) self.confs.add(dpath)
'Prepare the confs set'
def prep_configs(self):
for idx in range(self.opts['minions']): self.mkconf(idx)
'Prep the master start and fire it off'
def start(self):
sys.stdout.write('Generating master config...') self.mkconf() print('done') sys.stdout.write('Starting master...') self.start_master() print('done')
'Do the master start'
def start_master(self):
cmd = 'salt-master -c {0} --pid-file {1}'.format(self.conf, '{0}.pid'.format(self.conf)) if self.opts['foreground']: cmd += ' -l debug &' else: cmd += ' -d &' subprocess.call(cmd, shell=True)
'Make a master config and write it\''
def mkconf(self):
data = {} if self.opts['config_dir']: spath = os.path.join(self.opts['config_dir'], 'master') with open(spath) as conf: data = yaml.load(conf) data.update({'log_file': os.path.join(self.conf, 'master.log'), 'open_mode': True}) os.makedirs(self.conf) path = os.path.join(self.conf, 'master') with open(path, 'w+') as fp_: yaml.dump(data, fp_)
'Verify that ContextDict overrides properly within threads'
def test_threads(self):
rets = [] def tgt(x, s): inner_ret = [] over = self.cd.clone() inner_ret.append(self.cd.get('foo')) with over: inner_ret.append(over.get('foo')) over['foo'] = x inner_ret.append(over.get('foo')) time.sleep(s) inner_ret.append(over.get('foo')) rets.append(inner_ret) threads = [] for x in range(0, self.num_concurrent_tasks): s = (self.num_concurrent_tasks - x) t = threading.Thread(target=tgt, args=(x, s)) t.start() threads.append(t) for t in threads: t.join() for r in rets: self.assertEqual(r[0], r[1]) self.assertEqual(r[2], r[3])
'Verify that ContextDict overrides properly within coroutines'
@gen_test def test_coroutines(self):
@tornado.gen.coroutine def secondary_coroutine(over): raise tornado.gen.Return(over.get('foo')) @tornado.gen.coroutine def tgt(x, s, over): inner_ret = [] inner_ret.append(self.cd.get('foo')) inner_ret.append(over.get('foo')) over['foo'] = x inner_ret.append(over.get('foo')) (yield tornado.gen.sleep(s)) inner_ret.append(over.get('foo')) r = (yield secondary_coroutine(over)) inner_ret.append(r) raise tornado.gen.Return(inner_ret) futures = [] for x in range(0, self.num_concurrent_tasks): s = (self.num_concurrent_tasks - x) over = self.cd.clone() f = tornado.stack_context.run_with_stack_context(tornado.stack_context.StackContext((lambda : over)), (lambda : tgt(x, (s / 5.0), over))) futures.append(f) wait_iterator = tornado.gen.WaitIterator(*futures) while (not wait_iterator.done()): r = (yield wait_iterator.next()) self.assertEqual(r[0], r[1]) self.assertEqual(r[2], r[3]) self.assertEqual(r[3], r[4])
'Test that the contextDict is a dict'
def test_basic(self):
self.assertEqual(dict(self.cd), {'foo': 'global'})
'Test to see if the SMTP returner sends a message'
def _test_returner(self, mocked_smtplib):
ret = {'id': '12345', 'fun': 'mytest.func', 'fun_args': 'myfunc args', 'jid': '54321', 'return': 'The room is on fire as shes fixing her hair'} options = {'username': '', 'tls': '', 'from': '', 'fields': 'id,fun,fun_args,jid,return', 'to': '', 'host': '', 'renderer': 'jinja|yaml', 'template': '', 'password': '', 'gpgowner': '', 'subject': ''} with patch('salt.returners.smtp_return._get_options', MagicMock(return_value=options)): smtp.returner(ret) self.assertTrue(mocked_smtplib.return_value.sendmail.called)
'Clean up after tests. Note that a setUp function is not used in this TestCase because the _make_tmp_jid_dirs replaces it.'
def tearDown(self):
if os.path.exists(TMP_CACHE_DIR): shutil.rmtree(TMP_CACHE_DIR)
'Tests that the function returns None when no jid_root is found.'
def test_clean_old_jobs_no_jid_root(self):
with patch('os.path.exists', MagicMock(return_value=False)): self.assertEqual(local_cache.clean_old_jobs(), None)
'Tests that an empty JID dir is removed when it is old enough to be deleted.'
def test_clean_old_jobs_empty_jid_dir_removed(self):
(jid_dir, jid_file) = self._make_tmp_jid_dirs(create_files=False) self.assertEqual(jid_file, None) with patch.dict(local_cache.__opts__, {'keep_jobs': 1e-08}): local_cache.clean_old_jobs() self.assertEqual([], os.listdir(TMP_JID_DIR))
'Tests that an empty JID dir is NOT removed because it was created within the keep_jobs time frame.'
def test_clean_old_jobs_empty_jid_dir_remains(self):
(jid_dir, jid_file) = self._make_tmp_jid_dirs(create_files=False) self.assertEqual(jid_file, None) local_cache.clean_old_jobs() jid_dir_name = jid_dir.rpartition('/')[2] self.assertEqual([jid_dir_name], os.listdir(TMP_JID_DIR))
'Tests that the entire JID dir is removed when the jid_file is not a file. This scenario indicates a corrupted cache entry, so the entire dir is scrubbed.'
def test_clean_old_jobs_jid_file_corrupted(self):
(jid_dir, jid_file) = self._make_tmp_jid_dirs() jid_dir_name = jid_file.rpartition('/')[2] self.assertEqual(jid_dir_name, 'jid') with patch('os.path.isfile', MagicMock(return_value=False)) as mock: local_cache.clean_old_jobs() self.assertEqual([], os.listdir(TMP_JID_DIR))
'Test that the entire JID dir is removed when a job is old enough to be removed.'
def test_clean_old_jobs_jid_file_is_cleaned(self):
(jid_dir, jid_file) = self._make_tmp_jid_dirs() jid_dir_name = jid_file.rpartition('/')[2] self.assertEqual(jid_dir_name, 'jid') with patch.dict(local_cache.__opts__, {'keep_jobs': 1e-08}): local_cache.clean_old_jobs() self.assertEqual([], os.listdir(TMP_JID_DIR))
'Helper function to set up temporary directories and files used for testing the clean_old_jobs function. Returns a temp_dir name and a jid_file_path. If create_files is False, the jid_file_path will be None.'
def _make_tmp_jid_dirs(self, create_files=True):
if (not os.path.exists(TMP_JID_DIR)): os.makedirs(TMP_JID_DIR) temp_dir = tempfile.mkdtemp(dir=TMP_JID_DIR) jid_file_path = None if create_files: dir_name = '/'.join([temp_dir, 'jid']) os.mkdir(dir_name) jid_file_path = '/'.join([dir_name, 'jid']) with salt.utils.files.fopen(jid_file_path, 'w') as jid_file: jid_file.write('this is a jid file') return (temp_dir, jid_file_path)
'helper method to ensure files or dirs are either present or removed'
def _check_dir_files(self, msg, contents, status='None'):
for content in contents: log.debug('CONTENT {0}'.format(content)) if (status == 'present'): check_job_dir = os.path.exists(content) elif (status == 'removed'): if os.path.exists(content): check_job_dir = False else: check_job_dir = True self.assertTrue(check_job_dir, msg=(msg + content))
'helper method to add job.'
def _add_job(self):
opts = self.get_temp_config('master') opts['cachedir'] = self.TMP_CACHE_DIR load = {'fun_args': [], 'jid': '20160603132323715452', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'minion'} add_job = salt.utils.job.store_job(opts, load) self.assertEqual(add_job, None) self._check_dir_files('Dir/file does not exist: ', self.JOB_CACHE_DIR_FILES, status='present')
'test to ensure jobs are removed from job cache'
def test_clean_old_jobs(self):
self._add_job() self.assertEqual(local_cache.clean_old_jobs(), None) self._check_dir_files('job cache was not removed: ', self.JOB_CACHE_DIR_FILES, status='removed')
'test to ensure jobs are not removed when jobs dir is new'
def test_not_clean_new_jobs(self):
self._add_job() with patch.dict(local_cache.__opts__, {'keep_jobs': 24}): self.assertEqual(local_cache.clean_old_jobs(), None) self._check_dir_files('job cache was removed: ', self.JOB_CACHE_DIR_FILES, status='present')
'test to ensure removal of empty jid dir'
def test_empty_jid_dir(self):
new_jid_dir = os.path.join(self.JOBS_DIR, 'z0') self.EMPTY_JID_DIR.append(new_jid_dir) os.makedirs(new_jid_dir) if salt.utils.platform.is_windows(): import time lock_dir = (new_jid_dir + '.lckchk') tries = 0 while True: tries += 1 if (tries > 10): break try: os.rename(new_jid_dir, lock_dir) time.sleep(1) os.rename(lock_dir, new_jid_dir) break except WindowsError: continue self._check_dir_files('new_jid_dir was not created', self.EMPTY_JID_DIR, status='present') self.assertEqual(local_cache.clean_old_jobs(), None) self._check_dir_files('new_jid_dir was not removed', self.EMPTY_JID_DIR, status='removed')
'Test to ensure a simple name can auth against a given function. This tests to ensure test_user can access test.ping but *not* sys.doc'
def test_master_publish_name(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.ping') sys_doc_load = self.valid_clear_load sys_doc_load['fun'] = 'sys.doc' self.clear.publish(sys_doc_load) self.assertNotEqual(self.fire_event_mock.call_args[0][0]['fun'], 'sys.doc')
'Tests to ensure test_group can access test.echo but *not* sys.doc'
def test_master_publish_group(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): self.valid_clear_load['kwargs']['user'] = 'new_user' self.valid_clear_load['fun'] = 'test.echo' self.valid_clear_load['arg'] = 'hello' with patch('salt.auth.LoadAuth.get_groups', return_value=['test_group', 'second_test_group']): self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.echo') self.valid_clear_load['fun'] = 'sys.doc' self.assertNotEqual(self.fire_event_mock.call_args[0][0]['fun'], 'sys.doc')
'Tests to ensure we can only target minions for which we have permission with publisher acl. Note that in order for these sorts of tests to run correctly that you should NOT patch check_minions!'
def test_master_publish_some_minions(self):
self.valid_clear_load['kwargs']['username'] = 'test_user_mminion' self.valid_clear_load['user'] = 'test_user_mminion' self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, [])
'Test to ensure that we DO NOT access to a given function to all users with publisher acl. ex: my_minion: - my_func Yes, this seems like a bit of a no-op test but it\'s here to document that this functionality is NOT supported currently. WARNING: Do not patch this wit'
def test_master_not_user_glob_all(self):
self.valid_clear_load['kwargs']['username'] = 'NOT_A_VALID_USERNAME' self.valid_clear_load['user'] = 'NOT_A_VALID_USERNAME' self.valid_clear_load['fun'] = 'test.ping' self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, [])
'Test to ensure we can allow access to a given function for a user to a subset of minions selected by a glob. ex: test_user: \'minion_glob*\': - glob_mod.glob_func This test is a bit tricky, because ultimately the real functionality lies in what\'s returned from check_minions, but this checks a limited amount of logic on the way there as well. Note the inline patch.'
def test_master_minion_glob(self):
requested_function = 'foo.bar' requested_tgt = 'minion_glob1' self.valid_clear_load['tgt'] = requested_tgt self.valid_clear_load['fun'] = requested_function with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=['minion_glob1'])): self.clear.publish(self.valid_clear_load) self.assertTrue(self.fire_event_mock.called, 'Did not fire {0} for minion tgt {1}'.format(requested_function, requested_tgt)) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], requested_function, 'Did not fire {0} for minion glob'.format(requested_function))
'Test to ensure that we can allow access to a given set of functions in an execution module as selected by a glob. ex: my_user: my_minion: \'test.*\''
def test_master_function_glob(self):
pass
'Test simple arg restriction allowed. \'test_user_func\': minion1: - test.empty:'
def test_args_empty_spec(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': 'minion1', 'fun': 'test.empty', 'arg': ['TEST']}) self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.empty')
'Test simple arg restriction allowed. \'test_user_func\': minion1: - test.echo: args: - \'TEST\' - \'TEST.*\''
def test_args_simple_match(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': 'minion1', 'fun': 'test.echo', 'arg': ['TEST', 'any', 'TEST ABC']}) self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.echo')
'Test simple arg restriction allowed to pass unlisted args. \'test_user_func\': minion1: - test.echo: args: - \'TEST\' - \'TEST.*\''
def test_args_more_args(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': 'minion1', 'fun': 'test.echo', 'arg': ['TEST', 'any', 'TEST ABC', 'arg 3', {'kwarg1': 'val1', '__kwarg__': True}]}) self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.echo')
'Test simple arg restriction forbidden. \'test_user_func\': minion1: - test.echo: args: - \'TEST\' - \'TEST.*\''
def test_args_simple_forbidden(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': 'minion1', 'fun': 'test.echo', 'arg': ['TEST', 'any', 'TESLA']}) self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = ['TES', 'any', 'TEST1234'] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = ['TEST', 'any'] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = [] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, [])
'Test simple kwargs restriction allowed. \'test_user_func\': - test.echo: kwargs: text: \'KWMSG:.*\''
def test_args_kwargs_match(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', 'fun': 'test.echo', 'arg': [{'text': 'KWMSG: a message', 'anything': 'hello all', 'none': 'hello none', '__kwarg__': True}]}) self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.echo')
'Test simple kwargs restriction allowed. \'test_user_func\': - test.echo: kwargs: text: \'KWMSG:.*\''
def test_args_kwargs_mismatch(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', 'fun': 'test.echo'}) self.valid_clear_load['arg'] = [{'text': 'KWMSG a message', 'anything': 'hello all', 'none': 'hello none', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = [{'anything': 'hello all', 'none': 'hello none', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = [{'__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = [{}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = [] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = [{'text': 'KWMSG: a message', 'none': 'hello none', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = [{'text': 'KWMSG: a message', 'anything': 'hello all', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, [])
'Test mixed args and kwargs restriction allowed. \'test_user_func\': - \'my_mod.*\': args: - \'a.*\' - \'b.*\' kwargs: \'kwa\': \'kwa.*\' \'kwb\': \'kwb\''
def test_args_mixed_match(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', 'fun': 'my_mod.some_func', 'arg': ['alpha', 'beta', 'gamma', {'kwa': 'kwarg #1', 'kwb': 'kwb', 'one_more': 'just one more', '__kwarg__': True}]}) self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'my_mod.some_func')
'Test mixed args and kwargs restriction forbidden. \'test_user_func\': - \'my_mod.*\': args: - \'a.*\' - \'b.*\' kwargs: \'kwa\': \'kwa.*\' \'kwb\': \'kwb\''
def test_args_mixed_mismatch(self):
with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', 'fun': 'my_mod.some_func'}) self.valid_clear_load['arg'] = ['alpha', 'gamma', {'kwa': 'kwarg #1', 'kwb': 'kwb', 'one_more': 'just one more', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = ['alpha', 'beta', 'gamma', {'kwa': 'kkk', 'kwb': 'kwb', 'one_more': 'just one more', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = ['alpha', {'kwa': 'kwarg #1', 'kwb': 'kwb', 'one_more': 'just one more', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) self.valid_clear_load['arg'] = ['alpha', 'beta', 'gamma', {'kwa': 'kwarg #1', 'one_more': 'just one more', '__kwarg__': True}] self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, [])
'Tests passing batch value as a number'
def test_get_bnum(self):
self.batch.opts = {'batch': '2', 'timeout': 5} self.batch.minions = ['foo', 'bar'] self.assertEqual(Batch.get_bnum(self.batch), 2)
'Tests passing batch value as percentage'
def test_get_bnum_percentage(self):
self.batch.opts = {'batch': '50%', 'timeout': 5} self.batch.minions = ['foo'] self.assertEqual(Batch.get_bnum(self.batch), 1)
'Tests passing batch value as percentage over 100%'
def test_get_bnum_high_percentage(self):
self.batch.opts = {'batch': '160%', 'timeout': 5} self.batch.minions = ['foo', 'bar', 'baz'] self.assertEqual(Batch.get_bnum(self.batch), 4)
'Tests when an invalid batch value is passed'
def test_get_bnum_invalid_batch_data(self):
ret = Batch.get_bnum(self.batch) self.assertEqual(ret, None)
'The sample config file located in salt/conf/master must be completely commented out. This test checks for any lines that are not commented or blank.'
def test_conf_master_sample_is_commented(self):
master_config = (SAMPLE_CONF_DIR + 'master') ret = salt.config._read_conf_file(master_config) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(master_config))
'The sample config file located in salt/conf/minion must be completely commented out. This test checks for any lines that are not commented or blank.'
def test_conf_minion_sample_is_commented(self):
minion_config = (SAMPLE_CONF_DIR + 'minion') ret = salt.config._read_conf_file(minion_config) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(minion_config))
'The sample config file located in salt/conf/cloud must be completely commented out. This test checks for any lines that are not commented or blank.'
def test_conf_cloud_sample_is_commented(self):
cloud_config = (SAMPLE_CONF_DIR + 'cloud') ret = salt.config._read_conf_file(cloud_config) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(cloud_config))
'The sample config file located in salt/conf/cloud.profiles must be completely commented out. This test checks for any lines that are not commented or blank.'
def test_conf_cloud_profiles_sample_is_commented(self):
cloud_profiles_config = (SAMPLE_CONF_DIR + 'cloud.profiles') ret = salt.config._read_conf_file(cloud_profiles_config) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(cloud_profiles_config))
'The sample config file located in salt/conf/cloud.providers must be completely commented out. This test checks for any lines that are not commented or blank.'
def test_conf_cloud_providers_sample_is_commented(self):
cloud_providers_config = (SAMPLE_CONF_DIR + 'cloud.providers') ret = salt.config._read_conf_file(cloud_providers_config) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(cloud_providers_config))
'The sample config file located in salt/conf/proxy must be completely commented out. This test checks for any lines that are not commented or blank.'
def test_conf_proxy_sample_is_commented(self):
proxy_config = (SAMPLE_CONF_DIR + 'proxy') ret = salt.config._read_conf_file(proxy_config) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(proxy_config))
'The sample config file located in salt/conf/roster must be completely commented out. This test checks for any lines that are not commented or blank.'
def test_conf_roster_sample_is_commented(self):
roster_config = (SAMPLE_CONF_DIR + 'roster') ret = salt.config._read_conf_file(roster_config) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(roster_config))
'All cloud profile sample configs in salt/conf/cloud.profiles.d/* must be completely commented out. This test loops through all of the files in that directory to check for any lines that are not commented or blank.'
def test_conf_cloud_profiles_d_files_are_commented(self):
cloud_sample_files = os.listdir((SAMPLE_CONF_DIR + 'cloud.profiles.d/')) for conf_file in cloud_sample_files: profile_conf = ((SAMPLE_CONF_DIR + 'cloud.profiles.d/') + conf_file) ret = salt.config._read_conf_file(profile_conf) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(conf_file))
'All cloud profile sample configs in salt/conf/cloud.providers.d/* must be completely commented out. This test loops through all of the files in that directory to check for any lines that are not commented or blank.'
def test_conf_cloud_providers_d_files_are_commented(self):
cloud_sample_files = os.listdir((SAMPLE_CONF_DIR + 'cloud.providers.d/')) for conf_file in cloud_sample_files: provider_conf = ((SAMPLE_CONF_DIR + 'cloud.providers.d/') + conf_file) ret = salt.config._read_conf_file(provider_conf) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(conf_file))
'All cloud profile sample configs in salt/conf/cloud.maps.d/* must be completely commented out. This test loops through all of the files in that directory to check for any lines that are not commented or blank.'
def test_conf_cloud_maps_d_files_are_commented(self):
cloud_sample_files = os.listdir((SAMPLE_CONF_DIR + 'cloud.maps.d/')) for conf_file in cloud_sample_files: map_conf = ((SAMPLE_CONF_DIR + 'cloud.maps.d/') + conf_file) ret = salt.config._read_conf_file(map_conf) self.assertEqual(ret, {}, "Sample config file '{0}' must be commented out.".format(conf_file))