desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Query basic instance information'
def query(self, query_type='list_nodes'):
mapper = salt.cloud.Map(self._opts_defaults()) mapper.opts['selected_query_option'] = 'list_nodes' return mapper.map_providers_parallel(query_type)
'Query all instance information'
def full_query(self, query_type='list_nodes_full'):
mapper = salt.cloud.Map(self._opts_defaults()) mapper.opts['selected_query_option'] = 'list_nodes_full' return mapper.map_providers_parallel(query_type)
'Query select instance information'
def select_query(self, query_type='list_nodes_select'):
mapper = salt.cloud.Map(self._opts_defaults()) mapper.opts['selected_query_option'] = 'list_nodes_select' return mapper.map_providers_parallel(query_type)
'Query select instance information'
def min_query(self, query_type='list_nodes_min'):
mapper = salt.cloud.Map(self._opts_defaults()) mapper.opts['selected_query_option'] = 'list_nodes_min' return mapper.map_providers_parallel(query_type)
'Pass in a profile to create, names is a list of vm names to allocate vm_overrides is a special dict that will be per node options overrides Example: .. code-block:: python >>> client= salt.cloud.CloudClient(path=\'/etc/salt/cloud\') >>> client.profile(\'do_512_git\', names=[\'minion01\',]) {\'minion01\': {u\'backups_active\': \'False\', u\'created_at\': \'2014-09-04T18:10:15Z\', u\'droplet\': {u\'event_id\': 31000502, u\'id\': 2530006, u\'image_id\': 5140006, u\'name\': u\'minion01\', u\'size_id\': 66}, u\'id\': \'2530006\', u\'image_id\': \'5140006\', u\'ip_address\': \'107.XXX.XXX.XXX\', u\'locked\': \'True\', u\'name\': \'minion01\', u\'private_ip_address\': None, u\'region_id\': \'4\', u\'size_id\': \'66\', u\'status\': \'new\'}}'
def profile(self, profile, names, vm_overrides=None, **kwargs):
if (not vm_overrides): vm_overrides = {} kwargs['profile'] = profile mapper = salt.cloud.Map(self._opts_defaults(**kwargs)) if isinstance(names, six.string_types): names = names.split(',') return salt.utils.simple_types_filter(mapper.run_profile(profile, names, vm_overrides=vm_overrides))
'Pass in a location for a map to execute'
def map_run(self, path=None, **kwargs):
kwarg = {} if path: kwarg['map'] = path kwarg.update(kwargs) mapper = salt.cloud.Map(self._opts_defaults(**kwarg)) dmap = mapper.map_data() return salt.utils.simple_types_filter(mapper.run_map(dmap))
'Destroy the named VMs'
def destroy(self, names):
mapper = salt.cloud.Map(self._opts_defaults(destroy=True)) if isinstance(names, six.string_types): names = names.split(',') return salt.utils.simple_types_filter(mapper.destroy(names))
'Create the named VMs, without using a profile Example: .. code-block:: python client.create(provider=\'my-ec2-config\', names=[\'myinstance\'], image=\'ami-1624987f\', size=\'t1.micro\', ssh_username=\'ec2-user\', securitygroup=\'default\', delvol_on_destroy=True)'
def create(self, provider, names, **kwargs):
mapper = salt.cloud.Map(self._opts_defaults()) providers = self.opts['providers'] if (provider in providers): provider += ':{0}'.format(next(six.iterkeys(providers[provider]))) else: return False if isinstance(names, six.string_types): names = names.split(',') ret = {} for name in names: vm_ = kwargs.copy() vm_['name'] = name vm_['driver'] = provider vm_['profile'] = None vm_['provider'] = provider ret[name] = salt.utils.simple_types_filter(mapper.create(vm_)) return ret
'Perform actions with block storage devices Example: .. code-block:: python client.extra_action(names=[\'myblock\'], action=\'volume_create\', provider=\'my-nova\', kwargs={\'voltype\': \'SSD\', \'size\': 1000} client.extra_action(names=[\'salt-net\'], action=\'network_create\', provider=\'my-nova\', kwargs={\'cidr\': \'192.168.100.0/24\'}'
def extra_action(self, names, provider, action, **kwargs):
mapper = salt.cloud.Map(self._opts_defaults()) providers = mapper.map_providers_parallel() if (provider in providers): provider += ':{0}'.format(next(six.iterkeys(providers[provider]))) else: return False if isinstance(names, six.string_types): names = names.split(',') ret = {} for name in names: extra_ = kwargs.copy() extra_['name'] = name extra_['provider'] = provider extra_['profile'] = None extra_['action'] = action ret[name] = salt.utils.simple_types_filter(mapper.extras(extra_)) return ret
'Execute a single action via the cloud plugin backend Examples: .. code-block:: python client.action(fun=\'show_instance\', names=[\'myinstance\']) client.action(fun=\'show_image\', provider=\'my-ec2-config\', kwargs={\'image\': \'ami-10314d79\'}'
def action(self, fun=None, cloudmap=None, names=None, provider=None, instance=None, kwargs=None):
if (kwargs is None): kwargs = {} mapper = salt.cloud.Map(self._opts_defaults(action=fun, names=names, **kwargs)) if instance: if names: raise SaltCloudConfigError("Please specify either a list of 'names' or a single 'instance', but not both.") names = [instance] if (names and (not provider)): self.opts['action'] = fun return mapper.do_action(names, kwargs) if (provider and (not names)): return mapper.do_function(provider, fun, kwargs) else: raise SaltCloudConfigError('Either an instance (or list of names) or a provider must be specified, but not both.')
'Return the configured providers'
def get_configured_providers(self):
providers = set() for (alias, drivers) in six.iteritems(self.opts['providers']): if (len(drivers) > 1): for driver in drivers: providers.add('{0}:{1}'.format(alias, driver)) continue providers.add(alias) return providers
'Get a dict describing the configured providers'
def lookup_providers(self, lookup):
if (lookup is None): lookup = 'all' if (lookup == 'all'): providers = set() for (alias, drivers) in six.iteritems(self.opts['providers']): for driver in drivers: providers.add((alias, driver)) if (not providers): raise SaltCloudSystemExit('There are no cloud providers configured.') return providers if (':' in lookup): (alias, driver) = lookup.split(':') if ((alias not in self.opts['providers']) or (driver not in self.opts['providers'][alias])): raise SaltCloudSystemExit("No cloud providers matched '{0}'. Available: {1}".format(lookup, ', '.join(self.get_configured_providers()))) providers = set() for (alias, drivers) in six.iteritems(self.opts['providers']): for driver in drivers: if (lookup in (alias, driver)): providers.add((alias, driver)) if (not providers): raise SaltCloudSystemExit("No cloud providers matched '{0}'. Available selections: {1}".format(lookup, ', '.join(self.get_configured_providers()))) return providers
'Return a dictionary describing the configured profiles'
def lookup_profiles(self, provider, lookup):
if (provider is None): provider = 'all' if (lookup is None): lookup = 'all' if (lookup == 'all'): profiles = set() provider_profiles = set() for (alias, info) in six.iteritems(self.opts['profiles']): providers = info.get('provider') if providers: given_prov_name = providers.split(':')[0] salt_prov_name = providers.split(':')[1] if (given_prov_name == provider): provider_profiles.add((alias, given_prov_name)) elif (salt_prov_name == provider): provider_profiles.add((alias, salt_prov_name)) profiles.add((alias, given_prov_name)) if (not profiles): raise SaltCloudSystemExit('There are no cloud profiles configured.') if (provider != 'all'): return provider_profiles return profiles
'Return a mapping of what named VMs are running on what VM providers based on what providers are defined in the configuration and VMs'
def map_providers(self, query='list_nodes', cached=False):
if ((cached is True) and (query in self.__cached_provider_queries)): return self.__cached_provider_queries[query] pmap = {} for (alias, drivers) in six.iteritems(self.opts['providers']): for (driver, details) in six.iteritems(drivers): fun = '{0}.{1}'.format(driver, query) if (fun not in self.clouds): log.error('Public cloud provider {0} is not available'.format(driver)) continue if (alias not in pmap): pmap[alias] = {} try: with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): pmap[alias][driver] = self.clouds[fun]() except Exception as err: log.debug("Failed to execute '{0}()' while querying for running nodes: {1}".format(fun, err), exc_info_on_loglevel=logging.DEBUG) pmap[alias][driver] = [] self.__cached_provider_queries[query] = pmap return pmap
'Return a mapping of what named VMs are running on what VM providers based on what providers are defined in the configuration and VMs Same as map_providers but query in parallel.'
def map_providers_parallel(self, query='list_nodes', cached=False):
if ((cached is True) and (query in self.__cached_provider_queries)): return self.__cached_provider_queries[query] opts = self.opts.copy() multiprocessing_data = [] opts['providers'] = self._optimize_providers(opts['providers']) for (alias, drivers) in six.iteritems(opts['providers']): this_query = query for (driver, details) in six.iteritems(drivers): if (('selected_query_option' not in opts) and ('{0}.list_nodes_min'.format(driver) in self.clouds)): this_query = 'list_nodes_min' fun = '{0}.{1}'.format(driver, this_query) if (fun not in self.clouds): log.error('Public cloud provider {0} is not available'.format(driver)) continue multiprocessing_data.append({'fun': fun, 'opts': opts, 'query': this_query, 'alias': alias, 'driver': driver}) output = {} if (not multiprocessing_data): return output data_count = len(multiprocessing_data) pool = multiprocessing.Pool((((data_count < 10) and data_count) or 10), init_pool_worker) parallel_pmap = enter_mainloop(_run_parallel_map_providers_query, multiprocessing_data, pool=pool) for (alias, driver, details) in parallel_pmap: if (not details): continue if (alias not in output): output[alias] = {} output[alias][driver] = details self.__cached_provider_queries[query] = output return output
'Return an optimized mapping of available providers'
def _optimize_providers(self, providers):
new_providers = {} provider_by_driver = {} for (alias, driver) in six.iteritems(providers): for (name, data) in six.iteritems(driver): if (name not in provider_by_driver): provider_by_driver[name] = {} provider_by_driver[name][alias] = data for (driver, providers_data) in six.iteritems(provider_by_driver): fun = '{0}.optimize_providers'.format(driver) if (fun not in self.clouds): log.debug("The '{0}' cloud driver is unable to be optimized.".format(driver)) for (name, prov_data) in six.iteritems(providers_data): if (name not in new_providers): new_providers[name] = {} new_providers[name][driver] = prov_data continue new_data = self.clouds[fun](providers_data) if new_data: for (name, prov_data) in six.iteritems(new_data): if (name not in new_providers): new_providers[name] = {} new_providers[name][driver] = prov_data return new_providers
'Return a mapping of all location data for available providers'
def location_list(self, lookup='all'):
data = {} lookups = self.lookup_providers(lookup) if (not lookups): return data for (alias, driver) in lookups: fun = '{0}.avail_locations'.format(driver) if (fun not in self.clouds): log.debug("The '{0}' cloud driver defined under '{1}' provider alias is unable to get the locations information".format(driver, alias)) continue if (alias not in data): data[alias] = {} try: with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): data[alias][driver] = self.clouds[fun]() except Exception as err: log.error("Failed to get the output of '{0}()': {1}".format(fun, err), exc_info_on_loglevel=logging.DEBUG) return data
'Return a mapping of all image data for available providers'
def image_list(self, lookup='all'):
data = {} lookups = self.lookup_providers(lookup) if (not lookups): return data for (alias, driver) in lookups: fun = '{0}.avail_images'.format(driver) if (fun not in self.clouds): log.debug("The '{0}' cloud driver defined under '{1}' provider alias is unable to get the images information".format(driver, alias)) continue if (alias not in data): data[alias] = {} try: with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): data[alias][driver] = self.clouds[fun]() except Exception as err: log.error("Failed to get the output of '{0}()': {1}".format(fun, err), exc_info_on_loglevel=logging.DEBUG) return data
'Return a mapping of all image data for available providers'
def size_list(self, lookup='all'):
data = {} lookups = self.lookup_providers(lookup) if (not lookups): return data for (alias, driver) in lookups: fun = '{0}.avail_sizes'.format(driver) if (fun not in self.clouds): log.debug("The '{0}' cloud driver defined under '{1}' provider alias is unable to get the sizes information".format(driver, alias)) continue if (alias not in data): data[alias] = {} try: with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): data[alias][driver] = self.clouds[fun]() except Exception as err: log.error("Failed to get the output of '{0}()': {1}".format(fun, err), exc_info_on_loglevel=logging.DEBUG) return data
'Return a mapping of all image data for available providers'
def provider_list(self, lookup='all'):
data = {} lookups = self.lookup_providers(lookup) if (not lookups): return data for (alias, driver) in lookups: if (alias not in data): data[alias] = {} if (driver not in data[alias]): data[alias][driver] = {} return data
'Return a mapping of all configured profiles'
def profile_list(self, provider, lookup='all'):
data = {} lookups = self.lookup_profiles(provider, lookup) if (not lookups): return data for (alias, driver) in lookups: if (alias not in data): data[alias] = {} if (driver not in data[alias]): data[alias][driver] = {} return data
'Create/Verify the VMs in the VM data'
def create_all(self):
ret = [] for (vm_name, vm_details) in six.iteritems(self.opts['profiles']): ret.append({vm_name: self.create(vm_details)}) return ret
'Destroy the named VMs'
def destroy(self, names, cached=False):
processed = {} names = set(names) matching = self.get_running_by_names(names, cached=cached) vms_to_destroy = set() parallel_data = [] for (alias, drivers) in six.iteritems(matching): for (driver, vms) in six.iteritems(drivers): for name in vms: if (name in names): vms_to_destroy.add((alias, driver, name)) if self.opts['parallel']: parallel_data.append({'opts': self.opts, 'name': name, 'alias': alias, 'driver': driver}) if (self.opts['parallel'] and (len(parallel_data) > 0)): if ('pool_size' in self.opts): pool_size = self.opts['pool_size'] else: pool_size = len(parallel_data) log.info('Destroying in parallel mode; Cloud pool size: {0}'.format(pool_size)) output_multip = enter_mainloop(_destroy_multiprocessing, parallel_data, pool_size=pool_size) ret_multip = {} for obj in output_multip: ret_multip.update(obj) for obj in parallel_data: alias = obj['alias'] driver = obj['driver'] name = obj['name'] if (alias not in processed): processed[alias] = {} if (driver not in processed[alias]): processed[alias][driver] = {} processed[alias][driver][name] = ret_multip[name] if (name in names): names.remove(name) else: log.info('Destroying in non-parallel mode.') for (alias, driver, name) in vms_to_destroy: fun = '{0}.destroy'.format(driver) with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): ret = self.clouds[fun](name) if (alias not in processed): processed[alias] = {} if (driver not in processed[alias]): processed[alias][driver] = {} processed[alias][driver][name] = ret if (name in names): names.remove(name) for (alias, driver, name) in vms_to_destroy: ret = processed[alias][driver][name] if (not ret): continue vm_ = {'name': name, 'profile': None, 'provider': ':'.join([alias, driver]), 'driver': driver} minion_dict = salt.config.get_cloud_config_value('minion', vm_, self.opts, default={}) key_file = os.path.join(self.opts['pki_dir'], 'minions', minion_dict.get('id', name)) globbed_key_file = glob.glob('{0}.*'.format(key_file)) if ((not os.path.isfile(key_file)) and (not globbed_key_file)): if (isinstance(ret, dict) and ('newname' in ret)): salt.utils.cloud.remove_key(self.opts['pki_dir'], ret['newname']) continue if (os.path.isfile(key_file) and (not globbed_key_file)): salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file)) continue if ((not os.path.isfile(key_file)) and globbed_key_file and (len(globbed_key_file) == 1)): salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(globbed_key_file[0])) continue print("There are several minion keys who's name starts with '{0}'. We need to ask you which one should be deleted:".format(name)) while True: for (idx, filename) in enumerate(globbed_key_file): print(' {0}: {1}'.format(idx, os.path.basename(filename))) selection = input('Which minion key should be deleted(number)? ') try: selection = int(selection) except ValueError: print("'{0}' is not a valid selection.".format(selection)) try: filename = os.path.basename(globbed_key_file.pop(selection)) except Exception: continue delete = input("Delete '{0}'? [Y/n]? ".format(filename)) if ((delete == '') or delete.lower().startswith('y')): salt.utils.cloud.remove_key(self.opts['pki_dir'], filename) print("Deleted '{0}'".format(filename)) break print("Did not delete '{0}'".format(filename)) break if (names and (not processed)): raise SaltCloudSystemExit("The following VM's were not found: {0}".format(', '.join(names))) elif (names and processed): processed['Not Found'] = names elif (not processed): raise SaltCloudSystemExit('No machines were destroyed!') return processed
'Reboot the named VMs'
def reboot(self, names):
ret = [] pmap = self.map_providers_parallel() acts = {} for (prov, nodes) in six.iteritems(pmap): acts[prov] = [] for node in nodes: if (node in names): acts[prov].append(node) for (prov, names_) in six.iteritems(acts): fun = '{0}.reboot'.format(prov) for name in names_: ret.append({name: self.clouds[fun](name)}) return ret
'Create a single VM'
def create(self, vm_, local_master=True):
output = {} minion_dict = salt.config.get_cloud_config_value('minion', vm_, self.opts, default={}) (alias, driver) = vm_['provider'].split(':') fun = '{0}.create'.format(driver) if (fun not in self.clouds): log.error("Creating '{0[name]}' using '{0[provider]}' as the provider cannot complete since '{1}' is not available".format(vm_, driver)) return deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts) make_master = salt.config.get_cloud_config_value('make_master', vm_, self.opts) if deploy: if ((not make_master) and ('master' not in minion_dict)): log.warning("There's no master defined on the '{0}' VM settings.".format(vm_['name'])) if (('pub_key' not in vm_) and ('priv_key' not in vm_)): log.debug("Generating minion keys for '{0[name]}'".format(vm_)) (priv, pub) = salt.utils.cloud.gen_keys(salt.config.get_cloud_config_value('keysize', vm_, self.opts)) vm_['pub_key'] = pub vm_['priv_key'] = priv else: vm_['pub_key'] = None vm_['priv_key'] = None key_id = minion_dict.get('id', vm_['name']) domain = vm_.get('domain') if (vm_.get('use_fqdn') and domain): minion_dict['append_domain'] = domain if ('append_domain' in minion_dict): key_id = '.'.join([key_id, minion_dict['append_domain']]) if ((make_master is True) and ('master_pub' not in vm_) and ('master_pem' not in vm_)): log.debug("Generating the master keys for '{0[name]}'".format(vm_)) (master_priv, master_pub) = salt.utils.cloud.gen_keys(salt.config.get_cloud_config_value('keysize', vm_, self.opts)) vm_['master_pub'] = master_pub vm_['master_pem'] = master_priv if ((local_master is True) and (deploy is True)): salt.utils.cloud.accept_key(self.opts['pki_dir'], vm_['pub_key'], key_id) vm_['os'] = salt.config.get_cloud_config_value('script', vm_, self.opts) try: vm_['inline_script'] = salt.config.get_cloud_config_value('inline_script', vm_, self.opts) except KeyError: pass try: (alias, driver) = vm_['provider'].split(':') func = '{0}.create'.format(driver) with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): output = self.clouds[func](vm_) if ((output is not False) and ('sync_after_install' in self.opts)): if (self.opts['sync_after_install'] not in ('all', 'modules', 'states', 'grains')): log.error('Bad option for sync_after_install') return output time.sleep(3) start = int(time.time()) while (int(time.time()) < (start + 60)): mopts_ = salt.config.DEFAULT_MINION_OPTS conf_path = '/'.join(self.opts['conf_file'].split('/')[:(-1)]) mopts_.update(salt.config.minion_config(os.path.join(conf_path, 'minion'))) client = salt.client.get_local_client(mopts=self.opts) ret = client.cmd(vm_['name'], 'saltutil.sync_{0}'.format(self.opts['sync_after_install']), timeout=self.opts['timeout']) if ret: log.info(six.u('Synchronized the following dynamic modules: {0}').format(ret)) break except KeyError as exc: log.exception('Failed to create VM {0}. Configuration value {1} needs to be set'.format(vm_['name'], exc)) try: opt_map = self.opts['map'] except KeyError: opt_map = False if (self.opts['parallel'] and self.opts['start_action'] and (not opt_map)): log.info('Running {0} on {1}'.format(self.opts['start_action'], vm_['name'])) client = salt.client.get_local_client(mopts=self.opts) action_out = client.cmd(vm_['name'], self.opts['start_action'], timeout=(self.opts['timeout'] * 60)) output['ret'] = action_out return output
'Extra actions'
def extras(self, extra_):
output = {} (alias, driver) = extra_['provider'].split(':') fun = '{0}.{1}'.format(driver, extra_['action']) if (fun not in self.clouds): log.error("Creating '{0[name]}' using '{0[provider]}' as the provider cannot complete since '{1}' is not available".format(extra_, driver)) return try: with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=extra_['provider']): output = self.clouds[fun](**extra_) except KeyError as exc: log.exception('Failed to perform {0[provider]}.{0[action]} on {0[name]}. Configuration value {1} needs to be set'.format(extra_, exc)) return output
'Parse over the options passed on the command line and determine how to handle them'
def run_profile(self, profile, names, vm_overrides=None):
if (profile not in self.opts['profiles']): msg = 'Profile {0} is not defined'.format(profile) log.error(msg) return {'Error': msg} ret = {} if (not vm_overrides): vm_overrides = {} try: with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc: main_cloud_config = yaml.safe_load(mcc) if (not main_cloud_config): main_cloud_config = {} except KeyError: main_cloud_config = {} except IOError: main_cloud_config = {} if (main_cloud_config is None): main_cloud_config = {} mapped_providers = self.map_providers_parallel() profile_details = self.opts['profiles'][profile] vms = {} for (prov, val) in six.iteritems(mapped_providers): prov_name = next(iter(val)) for node in mapped_providers[prov][prov_name]: vms[node] = mapped_providers[prov][prov_name][node] vms[node]['provider'] = prov vms[node]['driver'] = prov_name (alias, driver) = profile_details['provider'].split(':') provider_details = self.opts['providers'][alias][driver].copy() del provider_details['profiles'] for name in names: if (name in vms): prov = vms[name]['provider'] driv = vms[name]['driver'] msg = six.u('{0} already exists under {1}:{2}').format(name, prov, driv) log.error(msg) ret[name] = {'Error': msg} continue vm_ = main_cloud_config.copy() vm_.update(provider_details) vm_.update(profile_details) vm_.update(vm_overrides) vm_['name'] = name if self.opts['parallel']: process = multiprocessing.Process(target=self.create, args=(vm_,)) process.start() ret[name] = {'Provisioning': 'VM being provisioned in parallel. PID: {0}'.format(process.pid)} continue try: ret[name] = self.create(vm_) if (not ret[name]): ret[name] = {'Error': 'Failed to deploy VM'} if (len(names) == 1): raise SaltCloudSystemExit('Failed to deploy VM') continue if (self.opts.get('show_deploy_args', False) is False): ret[name].pop('deploy_kwargs', None) except (SaltCloudSystemExit, SaltCloudConfigError) as exc: if (len(names) == 1): raise ret[name] = {'Error': str(exc)} return ret
'Perform an action on a VM which may be specific to this cloud provider'
def do_action(self, names, kwargs):
ret = {} invalid_functions = {} names = set(names) for (alias, drivers) in six.iteritems(self.map_providers_parallel()): if (not names): break for (driver, vms) in six.iteritems(drivers): if (not names): break valid_function = True fun = '{0}.{1}'.format(driver, self.opts['action']) if (fun not in self.clouds): log.info("'{0}()' is not available. Not actioning...".format(fun)) valid_function = False for (vm_name, vm_details) in six.iteritems(vms): if (not names): break if (vm_name not in names): if (not isinstance(vm_details, dict)): vm_details = {} if (('id' in vm_details) and (vm_details['id'] in names)): vm_name = vm_details['id'] else: log.debug("vm:{0} in provider:{1} is not in name list:'{2}'".format(vm_name, driver, names)) continue if (valid_function is False): if (invalid_functions.get(fun) is None): invalid_functions.update({fun: []}) invalid_functions[fun].append(vm_name) continue with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): if (alias not in ret): ret[alias] = {} if (driver not in ret[alias]): ret[alias][driver] = {} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: ret[alias][driver][vm_name] = self.clouds[fun](vm_name, kwargs, call='action') else: ret[alias][driver][vm_name] = self.clouds[fun](vm_name, call='action') names.remove(vm_name) missing_vms = set() if invalid_functions: ret['Invalid Actions'] = invalid_functions invalid_func_vms = set() for (key, val) in six.iteritems(invalid_functions): invalid_func_vms = invalid_func_vms.union(set(val)) missing_vms = names.difference(invalid_func_vms) if missing_vms: ret['Not Found'] = list(missing_vms) ret['Not Actioned/Not Running'] = list(names) if (not names): return ret if missing_vms: return ret ret['Not Actioned/Not Running'] = list(names) ret['Not Found'] = list(names) return ret
'Perform a function against a cloud provider'
def do_function(self, prov, func, kwargs):
matches = self.lookup_providers(prov) if (len(matches) > 1): raise SaltCloudSystemExit("More than one results matched '{0}'. Please specify one of: {1}".format(prov, ', '.join(['{0}:{1}'.format(alias, driver) for (alias, driver) in matches]))) (alias, driver) = matches.pop() fun = '{0}.{1}'.format(driver, func) if (fun not in self.clouds): raise SaltCloudSystemExit("The '{0}' cloud provider alias, for the '{1}' driver, does not define the function '{2}'".format(alias, driver, func)) log.debug("Trying to execute '{0}' with the following kwargs: {1}".format(fun, kwargs)) with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): if kwargs: return {alias: {driver: self.clouds[fun](call='function', kwargs=kwargs)}} return {alias: {driver: self.clouds[fun](call='function')}}
'Remove any mis-configured cloud providers from the available listing'
def __filter_non_working_providers(self):
for (alias, drivers) in six.iteritems(self.opts['providers'].copy()): for driver in drivers.copy(): fun = '{0}.get_configured_provider'.format(driver) if (fun not in self.clouds): log.warning("The cloud driver, '{0}', configured under the '{1}' cloud provider alias, could not be loaded. Please check your provider configuration files and ensure all required dependencies are installed for the '{0}' driver.\nIn rare cases, this could indicate the '{2}()' function could not be found.\nRemoving '{0}' from the available providers list".format(driver, alias, fun)) self.opts['providers'][alias].pop(driver) if (alias not in self.opts['providers']): continue if (not self.opts['providers'][alias]): self.opts['providers'].pop(alias) continue with salt.utils.context.func_globals_inject(self.clouds[fun], __active_provider_name__=':'.join([alias, driver])): if (self.clouds[fun]() is False): log.warning("The cloud driver, '{0}', configured under the '{1}' cloud provider alias is not properly configured. Removing it from the available providers list.".format(driver, alias)) self.opts['providers'][alias].pop(driver) if (alias not in self.opts['providers']): continue if (not self.opts['providers'][alias]): self.opts['providers'].pop(alias)
'Read in the specified map file and return the map structure'
def read(self):
map_ = None if (self.opts.get('map', None) is None): if (self.opts.get('map_data', None) is None): return {} else: map_ = self.opts['map_data'] if (not map_): local_minion_opts = copy.deepcopy(self.opts) local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) if (not os.path.isfile(self.opts['map'])): if (not self.opts['map'].startswith('salt://')): log.error("The specified map file does not exist: '{0}'".format(self.opts['map'])) raise SaltCloudNotFound() if self.opts['map'].startswith('salt://'): cached_map = self.minion.functions['cp.cache_file'](self.opts['map']) else: cached_map = self.opts['map'] try: renderer = self.opts.get('renderer', 'yaml_jinja') rend = salt.loader.render(self.opts, {}) blacklist = self.opts.get('renderer_blacklist') whitelist = self.opts.get('renderer_whitelist') map_ = compile_template(cached_map, rend, renderer, blacklist, whitelist) except Exception as exc: log.error('Rendering map {0} failed, render error:\n{1}'.format(self.opts['map'], exc), exc_info_on_loglevel=logging.DEBUG) return {} if ('include' in map_): map_ = salt.config.include_config(map_, self.opts['map'], verbose=False) for (profile, mapped) in six.iteritems(map_.copy()): if isinstance(mapped, (list, tuple)): entries = {} for mapping in mapped: if isinstance(mapping, six.string_types): mapping = {mapping: None} for (name, overrides) in six.iteritems(mapping): if ((overrides is None) or isinstance(overrides, bool)): overrides = {} try: overrides.setdefault('name', name) except AttributeError: log.error("Cannot use 'name' as a minion id in a cloud map as it is a reserved word. Please change 'name' to a different minion id reference.") return {} entries[name] = overrides map_[profile] = entries continue if isinstance(mapped, dict): entries = {} for (name, overrides) in six.iteritems(mapped): overrides.setdefault('name', name) entries[name] = overrides map_[profile] = entries continue if isinstance(mapped, six.string_types): mapped = [mapped] map_[profile] = {} for name in mapped: map_[profile][name] = {'name': name} return map_
'Create a data map of what to execute on'
def map_data(self, cached=False):
ret = {'create': {}} pmap = self.map_providers_parallel(cached=cached) exist = set() defined = set() for (profile_name, nodes) in six.iteritems(self.rendered_map): if (profile_name not in self.opts['profiles']): msg = "The required profile, '{0}', defined in the map does not exist. The defined nodes, {1}, will not be created.".format(profile_name, ', '.join(("'{0}'".format(node) for node in nodes))) log.error(msg) if ('errors' not in ret): ret['errors'] = {} ret['errors'][profile_name] = msg continue profile_data = self.opts['profiles'].get(profile_name) (alias, driver) = profile_data.get('provider').split(':') provider_details = self.opts['providers'][alias][driver].copy() del provider_details['profiles'] provider_details.update(profile_data) profile_data = provider_details for (nodename, overrides) in six.iteritems(nodes): nodedata = copy.deepcopy(profile_data) for setting in ('grains', 'master', 'minion', 'volumes', 'requires'): deprecated = 'map_{0}'.format(setting) if (deprecated in overrides): log.warning("The use of '{0}' on the '{1}' mapping has been deprecated. The preferred way now is to just define '{2}'. For now, salt-cloud will do the proper thing and convert the deprecated mapping into the preferred one.".format(deprecated, nodename, setting)) overrides[setting] = overrides.pop(deprecated) if (('minion' in overrides) and ('minion' in nodedata) and ('grains' in overrides['minion']) and ('grains' in nodedata['minion'])): nodedata['minion']['grains'].update(overrides['minion']['grains']) del overrides['minion']['grains'] if (len(overrides['minion']) == 0): del overrides['minion'] nodedata = salt.utils.dictupdate.update(nodedata, overrides) ret['create'][nodename] = nodedata (alias, driver) = nodedata['provider'].split(':') defined.add((alias, driver, nodename)) def get_matching_by_name(name): matches = {} for (alias, drivers) in six.iteritems(pmap): for (driver, vms) in six.iteritems(drivers): for (vm_name, details) in six.iteritems(vms): if ((vm_name == name) and (driver not in matches)): matches[driver] = details['state'] return matches for (alias, drivers) in six.iteritems(pmap): for (driver, vms) in six.iteritems(drivers): for (name, details) in six.iteritems(vms): exist.add((alias, driver, name)) if (name not in ret['create']): continue matching = get_matching_by_name(name) if (not matching): continue for item in matching: if (name not in ret['create']): break log.warning("'{0}' already exists, removing from the create map.".format(name)) if ('existing' not in ret): ret['existing'] = {} ret['existing'][name] = ret['create'].pop(name) if (('hard' in self.opts) and self.opts['hard']): if (self.opts['enable_hard_maps'] is False): raise SaltCloudSystemExit("The --hard map can be extremely dangerous to use, and therefore must explicitly be enabled in the main configuration file, by setting 'enable_hard_maps' to True") ret['destroy'] = exist.difference(defined) return ret
'Execute the contents of the VM map'
def run_map(self, dmap):
if self._has_loop(dmap): msg = 'Uh-oh, that cloud map has a dependency loop!' log.error(msg) raise SaltCloudException(msg) for (key, val) in six.iteritems(dmap['create']): log.info('Calculating dependencies for {0}'.format(key)) level = 0 level = self._calcdep(dmap, key, val, level) log.debug('Got execution order {0} for {1}'.format(level, key)) dmap['create'][key]['level'] = level try: existing_list = six.iteritems(dmap['existing']) except KeyError: existing_list = six.iteritems({}) for (key, val) in existing_list: log.info('Calculating dependencies for {0}'.format(key)) level = 0 level = self._calcdep(dmap, key, val, level) log.debug('Got execution order {0} for {1}'.format(level, key)) dmap['existing'][key]['level'] = level create_list = sorted(six.iteritems(dmap['create']), key=(lambda x: x[1]['level'])) output = {} if self.opts['parallel']: parallel_data = [] master_name = None master_minion_name = None master_host = None master_finger = None try: (master_name, master_profile) = next(((name, profile) for (name, profile) in create_list if (profile.get('make_master', False) is True))) master_minion_name = master_name log.debug("Creating new master '{0}'".format(master_name)) if (salt.config.get_cloud_config_value('deploy', master_profile, self.opts) is False): raise SaltCloudSystemExit("Cannot proceed with 'make_master' when salt deployment is disabled(ex: --no-deploy).") log.debug("Generating master keys for '{0[name]}'".format(master_profile)) (priv, pub) = salt.utils.cloud.gen_keys(salt.config.get_cloud_config_value('keysize', master_profile, self.opts)) master_profile['master_pub'] = pub master_profile['master_pem'] = priv master_temp_pub = salt.utils.files.mkstemp() with salt.utils.files.fopen(master_temp_pub, 'w') as mtp: mtp.write(pub) master_finger = salt.utils.pem_finger(master_temp_pub, sum_type=self.opts['hash_type']) os.unlink(master_temp_pub) if (master_profile.get('make_minion', True) is True): master_profile.setdefault('minion', {}) if ('id' in master_profile['minion']): master_minion_name = master_profile['minion']['id'] if ('master' not in master_profile['minion']): master_profile['minion']['master'] = '127.0.0.1' if (master_finger is not None): master_profile['master_finger'] = master_finger for (name, profile) in create_list: make_minion = salt.config.get_cloud_config_value('make_minion', profile, self.opts, default=True) if (make_minion is False): continue log.debug("Generating minion keys for '{0[name]}'".format(profile)) (priv, pub) = salt.utils.cloud.gen_keys(salt.config.get_cloud_config_value('keysize', profile, self.opts)) profile['pub_key'] = pub profile['priv_key'] = priv master_profile.setdefault('preseed_minion_keys', {}) master_profile['preseed_minion_keys'].update({name: pub}) local_master = False if (master_profile['minion'].get('local_master', False) and (master_profile['minion'].get('master', None) is not None)): local_master = True out = self.create(master_profile, local_master=local_master) if (not isinstance(out, dict)): log.debug('Master creation details is not a dictionary: {0}'.format(out)) elif ('Errors' in out): raise SaltCloudSystemExit('An error occurred while creating the master, not continuing: {0}'.format(out['Errors'])) deploy_kwargs = (((self.opts.get('show_deploy_args', False) is True) and out.get('deploy_kwargs', {})) or out.pop('deploy_kwargs', {})) master_host = deploy_kwargs.get('salt_host', deploy_kwargs.get('host', None)) if (master_host is None): raise SaltCloudSystemExit('Host for new master {0} was not found, aborting map'.format(master_name)) output[master_name] = out except StopIteration: log.debug('No make_master found in map') master_pub = os.path.join(self.opts['pki_dir'], 'master.pub') if os.path.isfile(master_pub): master_finger = salt.utils.pem_finger(master_pub, sum_type=self.opts['hash_type']) opts = self.opts.copy() if self.opts['parallel']: log.info('Since parallel deployment is in use, ssh console output is disabled. All ssh output will be logged though') opts['display_ssh_output'] = False local_master = (master_name is None) for (name, profile) in create_list: if (name in (master_name, master_minion_name)): continue if (('minion' in profile) and profile['minion'].get('local_master', False) and (profile['minion'].get('master', None) is not None)): local_master = True if ((master_finger is not None) and (local_master is False)): profile['master_finger'] = master_finger if (master_host is not None): profile.setdefault('minion', {}) profile['minion'].setdefault('master', master_host) if self.opts['parallel']: parallel_data.append({'opts': opts, 'name': name, 'profile': profile, 'local_master': local_master}) continue try: output[name] = self.create(profile, local_master=local_master) if ((self.opts.get('show_deploy_args', False) is False) and ('deploy_kwargs' in output) and isinstance(output[name], dict)): output[name].pop('deploy_kwargs', None) except SaltCloudException as exc: log.error("Failed to deploy '{0}'. Error: {1}".format(name, exc), exc_info_on_loglevel=logging.DEBUG) output[name] = {'Error': str(exc)} for name in dmap.get('destroy', ()): output[name] = self.destroy(name) if (self.opts['parallel'] and (len(parallel_data) > 0)): if ('pool_size' in self.opts): pool_size = self.opts['pool_size'] else: pool_size = len(parallel_data) log.info('Cloud pool size: {0}'.format(pool_size)) output_multip = enter_mainloop(_create_multiprocessing, parallel_data, pool_size=pool_size) if self.opts['start_action']: actionlist = [] grp = (-1) for (key, val) in groupby(six.itervalues(dmap['create']), (lambda x: x['level'])): actionlist.append([]) grp += 1 for item in val: actionlist[grp].append(item['name']) out = {} for group in actionlist: log.info('Running {0} on {1}'.format(self.opts['start_action'], ', '.join(group))) client = salt.client.get_local_client() out.update(client.cmd(','.join(group), self.opts['start_action'], timeout=(self.opts['timeout'] * 60), tgt_type='list')) for obj in output_multip: next(six.itervalues(obj))['ret'] = out[next(six.iterkeys(obj))] output.update(obj) else: for obj in output_multip: output.update(obj) return output
'Execute the salt-cloud command line'
def run(self):
self.parse_args() salt_master_user = self.config.get('user') if (salt_master_user is None): salt_master_user = salt.utils.get_user() if (not check_user(salt_master_user)): self.error("If salt-cloud is running on a master machine, salt-cloud needs to run as the same user as the salt-master, '{0}'. If salt-cloud is not running on a salt-master, the appropriate write permissions must be granted to '{1}'. Please run salt-cloud as root, '{0}', or change permissions for '{1}'.".format(salt_master_user, syspaths.CONFIG_DIR)) try: if self.config['verify_env']: verify_env([os.path.dirname(self.config['conf_file'])], salt_master_user) logfile = self.config['log_file'] if ((logfile is not None) and (not logfile.startswith('tcp://')) and (not logfile.startswith('udp://')) and (not logfile.startswith('file://'))): verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) self.setup_logfile_logger() verify_log(self.config) if self.options.update_bootstrap: ret = salt.utils.cloud.update_bootstrap(self.config) salt.output.display_output(ret, self.options.output, opts=self.config) self.exit(salt.defaults.exitcodes.EX_OK) log.info('salt-cloud starting') try: mapper = salt.cloud.Map(self.config) except SaltCloudSystemExit as exc: self.handle_exception(exc.args, exc) except SaltCloudException as exc: msg = 'There was an error generating the mapper.' self.handle_exception(msg, exc) names = self.config.get('names', None) if (names is not None): filtered_rendered_map = {} for map_profile in mapper.rendered_map: filtered_map_profile = {} for name in mapper.rendered_map[map_profile]: if (name in names): filtered_map_profile[name] = mapper.rendered_map[map_profile][name] if filtered_map_profile: filtered_rendered_map[map_profile] = filtered_map_profile mapper.rendered_map = filtered_rendered_map ret = {} if (self.selected_query_option is not None): if (self.selected_query_option == 'list_providers'): try: ret = mapper.provider_list() except (SaltCloudException, Exception) as exc: msg = 'There was an error listing providers: {0}' self.handle_exception(msg, exc) elif (self.selected_query_option == 'list_profiles'): provider = self.options.list_profiles try: ret = mapper.profile_list(provider) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing profiles: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None): log.info("Applying map from '{0}'.".format(self.config['map'])) try: ret = mapper.interpolated_map(query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a custom map: {0}' self.handle_exception(msg, exc) else: try: ret = mapper.map_providers_parallel(query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a map: {0}' self.handle_exception(msg, exc) elif (self.options.list_locations is not None): try: ret = mapper.location_list(self.options.list_locations) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing locations: {0}' self.handle_exception(msg, exc) elif (self.options.list_images is not None): try: ret = mapper.image_list(self.options.list_images) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing images: {0}' self.handle_exception(msg, exc) elif (self.options.list_sizes is not None): try: ret = mapper.size_list(self.options.list_sizes) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing sizes: {0}' self.handle_exception(msg, exc) elif (self.options.destroy and (self.config.get('names', None) or self.config.get('map', None))): map_file = self.config.get('map', None) names = self.config.get('names', ()) if (map_file is not None): if (names != ()): msg = "Supplying a mapfile, '{0}', in addition to instance names {1} with the '--destroy' or '-d' function is not supported. Please choose to delete either the entire map file or individual instances.".format(map_file, names) self.handle_exception(msg, SaltCloudSystemExit) log.info("Applying map from '{0}'.".format(map_file)) matching = mapper.delete_map(query='list_nodes') else: matching = mapper.get_running_by_names(names, profile=self.options.profile) if (not matching): print('No machines were found to be destroyed') self.exit(salt.defaults.exitcodes.EX_OK) msg = 'The following virtual machines are set to be destroyed:\n' names = set() for (alias, drivers) in six.iteritems(matching): msg += ' {0}:\n'.format(alias) for (driver, vms) in six.iteritems(drivers): msg += ' {0}:\n'.format(driver) for name in vms: msg += ' {0}\n'.format(name) names.add(name) try: if self.print_confirm(msg): ret = mapper.destroy(names, cached=True) except (SaltCloudException, Exception) as exc: msg = 'There was an error destroying machines: {0}' self.handle_exception(msg, exc) elif (self.options.action and (self.config.get('names', None) or self.config.get('map', None))): if self.config.get('map', None): log.info("Applying map from '{0}'.".format(self.config['map'])) try: names = mapper.get_vmnames_by_action(self.options.action) except SaltCloudException as exc: msg = 'There was an error actioning virtual machines.' self.handle_exception(msg, exc) else: names = self.config.get('names', None) kwargs = {} machines = [] msg = 'The following virtual machines are set to be actioned with "{0}":\n'.format(self.options.action) for name in names: if ('=' in name): (key, value) = name.split('=', 1) kwargs[key] = value else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error actioning machines: {0}' self.handle_exception(msg, exc) elif self.options.function: kwargs = {} args = self.args[:] for arg in args[:]: if ('=' in arg): (key, value) = arg.split('=', 1) kwargs[key] = value args.remove(arg) if args: self.error('Any arguments passed to --function need to be passed as kwargs. Ex: image=ami-54cf5c3d. Remaining arguments: {0}'.format(args)) try: ret = mapper.do_function(self.function_provider, self.function_name, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error running the function: {0}' self.handle_exception(msg, exc) elif (self.options.profile and self.config.get('names', False)): try: ret = mapper.run_profile(self.options.profile, self.config.get('names')) except (SaltCloudException, Exception) as exc: msg = 'There was a profile error: {0}' self.handle_exception(msg, exc) elif self.options.set_password: username = self.credential_username provider_name = 'salt.cloud.provider.{0}'.format(self.credential_provider) salt.utils.cloud.store_password_in_keyring(provider_name, username) elif (self.config.get('map', None) and (self.selected_query_option is None)): if (len(mapper.rendered_map) == 0): sys.stderr.write('No nodes defined in this map') self.exit(salt.defaults.exitcodes.EX_GENERIC) try: ret = {} run_map = True log.info("Applying map from '{0}'.".format(self.config['map'])) dmap = mapper.map_data() msg = '' if ('errors' in dmap): msg += 'Found the following errors:\n' for (profile_name, error) in six.iteritems(dmap['errors']): msg += ' {0}: {1}\n'.format(profile_name, error) sys.stderr.write(msg) sys.stderr.flush() msg = '' if ('existing' in dmap): msg += 'The following virtual machines already exist:\n' for name in dmap['existing']: msg += ' {0}\n'.format(name) if dmap['create']: msg += 'The following virtual machines are set to be created:\n' for name in dmap['create']: msg += ' {0}\n'.format(name) if ('destroy' in dmap): msg += 'The following virtual machines are set to be destroyed:\n' for name in dmap['destroy']: msg += ' {0}\n'.format(name) if ((not dmap['create']) and (not dmap.get('destroy', None))): if (not dmap.get('existing', None)): print(msg) self.exit(1) else: run_map = False if run_map: if self.print_confirm(msg): ret = mapper.run_map(dmap) if (self.config.get('parallel', False) is False): log.info('Complete') if dmap.get('existing', None): for name in dmap['existing']: if ('ec2' in dmap['existing'][name]['provider']): msg = 'Instance already exists, or is terminated and has the same name.' else: msg = 'Already running.' ret[name] = {'Message': msg} except (SaltCloudException, Exception) as exc: msg = 'There was a query error: {0}' self.handle_exception(msg, exc) elif self.options.bootstrap: host = self.options.bootstrap if (self.args and ('=' not in self.args[0])): minion_id = self.args.pop(0) else: minion_id = host vm_ = {'driver': '', 'ssh_host': host, 'name': minion_id} args = self.args[:] for arg in args[:]: if ('=' in arg): (key, value) = arg.split('=', 1) vm_[key] = value args.remove(arg) if args: self.error('Any arguments passed to --bootstrap need to be passed as kwargs. Ex: ssh_username=larry. Remaining arguments: {0}'.format(args)) try: ret = salt.utils.cloud.bootstrap(vm_, self.config) except (SaltCloudException, Exception) as exc: msg = 'There was an error bootstrapping the minion: {0}' self.handle_exception(msg, exc) else: self.error('Nothing was done. Using the proper arguments?') salt.output.display_output(ret, self.options.output, opts=self.config) self.exit(salt.defaults.exitcodes.EX_OK)
'In pack, if any of the values are None they will be replaced with an empty context-specific dict'
def __init__(self, module_dirs, opts=None, tag=u'module', loaded_base_name=None, mod_type_check=None, pack=None, whitelist=None, virtual_enable=True, static_modules=None, proxy=None, virtual_funcs=None):
self.inject_globals = {} self.pack = ({} if (pack is None) else pack) if (opts is None): opts = {} threadsafety = (not opts.get(u'multiprocessing')) self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety) self.opts = self.__prep_mod_opts(opts) self.module_dirs = module_dirs self.tag = tag self.loaded_base_name = (loaded_base_name or LOADED_BASE_NAME) self.mod_type_check = (mod_type_check or _mod_type) if (u'__context__' not in self.pack): self.pack[u'__context__'] = None for (k, v) in six.iteritems(self.pack): if (v is None): self.context_dict.setdefault(k, {}) self.pack[k] = salt.utils.context.NamespacedDictWrapper(self.context_dict, k) self.whitelist = whitelist self.virtual_enable = virtual_enable self.initial_load = True self.missing_modules = {} self.loaded_modules = {} self.loaded_files = set() self.static_modules = (static_modules if static_modules else []) if (virtual_funcs is None): virtual_funcs = [] self.virtual_funcs = virtual_funcs self.disabled = set(self.opts.get(u'disable_{0}{1}'.format(self.tag, (u'' if (self.tag[(-1)] == u's') else u's')), [])) self.refresh_file_mapping() super(LazyLoader, self).__init__() _generate_module(u'{0}.int'.format(self.loaded_base_name)) _generate_module(u'{0}.int.{1}'.format(self.loaded_base_name, tag)) _generate_module(u'{0}.ext'.format(self.loaded_base_name)) _generate_module(u'{0}.ext.{1}'.format(self.loaded_base_name, tag))
'Override the __getitem__ in order to decorate the returned function if we need to last-minute inject globals'
def __getitem__(self, item):
func = super(LazyLoader, self).__getitem__(item) if self.inject_globals: return global_injector_decorator(self.inject_globals)(func) else: return func
'Allow for "direct" attribute access-- this allows jinja templates to access things like `salt.test.ping()`'
def __getattr__(self, mod_name):
if (mod_name in (u'__getstate__', u'__setstate__')): return object.__getattribute__(self, mod_name) try: return object.__getattr__(self, mod_name) except AttributeError: pass if ((mod_name not in self.loaded_modules) and (not self.loaded)): for name in self._iter_files(mod_name): if (name in self.loaded_files): continue if (self._load_module(name) and (mod_name in self.loaded_modules)): break if (mod_name in self.loaded_modules): return self.loaded_modules[mod_name] else: raise AttributeError(mod_name)
'Return the error string for a missing function. This can range from "not available\' to "__virtual__" returned False'
def missing_fun_string(self, function_name):
mod_name = function_name.split(u'.')[0] if (mod_name in self.loaded_modules): return u"'{0}' is not available.".format(function_name) else: try: reason = self.missing_modules[mod_name] except KeyError: return u"'{0}' is not available.".format(function_name) else: if (reason is not None): return u"'{0}' __virtual__ returned False: {1}".format(mod_name, reason) else: return u"'{0}' __virtual__ returned False".format(mod_name)
'refresh the mapping of the FS on disk'
def refresh_file_mapping(self):
self.suffix_map = {} suffix_order = [u''] for (suffix, mode, kind) in SUFFIXES: self.suffix_map[suffix] = (suffix, mode, kind) suffix_order.append(suffix) if (self.opts.get(u'cython_enable', True) is True): try: global pyximport pyximport = __import__(u'pyximport') pyximport.install() self.suffix_map[u'.pyx'] = tuple() except ImportError: log.info(u'Cython is enabled in the options but not present in the system path. Skipping Cython modules.') if (self.opts.get(u'enable_zip_modules', True) is True): self.suffix_map[u'.zip'] = tuple() if USE_IMPORTLIB: self.suffix_map[u''] = (u'', u'', MODULE_KIND_PKG_DIRECTORY) else: self.suffix_map[u''] = (u'', u'', imp.PKG_DIRECTORY) self.file_mapping = salt.utils.odict.OrderedDict() for mod_dir in self.module_dirs: files = [] try: files = sorted(os.listdir(mod_dir)) except OSError: continue for filename in files: try: if filename.startswith(u'_'): continue (f_noext, ext) = os.path.splitext(filename) if (ext not in self.suffix_map): continue if (f_noext in self.disabled): log.trace(u'Skipping %s, it is disabled by configuration', filename) continue fpath = os.path.join(mod_dir, filename) if (ext == u''): subfiles = os.listdir(fpath) for suffix in suffix_order: if (u'' == suffix): continue init_file = u'__init__{0}'.format(suffix) if (init_file in subfiles): break else: continue if (f_noext in self.file_mapping): curr_ext = self.file_mapping[f_noext][1] if ((u'' in (curr_ext, ext)) and (curr_ext != ext)): log.error(u"Module/package collision: '%s' and '%s'", fpath, self.file_mapping[f_noext][0]) if ((not curr_ext) or (suffix_order.index(ext) >= suffix_order.index(curr_ext))): continue self.file_mapping[f_noext] = (fpath, ext) except OSError: continue for smod in self.static_modules: f_noext = smod.split(u'.')[(-1)] self.file_mapping[f_noext] = (smod, u'.o')
'Clear the dict'
def clear(self):
super(LazyLoader, self).clear() self.loaded_files = set() self.missing_modules = {} self.loaded_modules = {} if hasattr(self, u'opts'): self.refresh_file_mapping() self.initial_load = False
'Strip out of the opts any logger instance'
def __prep_mod_opts(self, opts):
if (u'__grains__' not in self.pack): self.context_dict[u'grains'] = opts.get(u'grains', {}) self.pack[u'__grains__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, u'grains', override_name=u'grains') if (u'__pillar__' not in self.pack): self.context_dict[u'pillar'] = opts.get(u'pillar', {}) self.pack[u'__pillar__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, u'pillar', override_name=u'pillar') mod_opts = {} for (key, val) in list(opts.items()): if (key == u'logger'): continue mod_opts[key] = val return mod_opts
'Iterate over all file_mapping files in order of closeness to mod_name'
def _iter_files(self, mod_name):
if (mod_name in self.file_mapping): (yield mod_name) for k in self.file_mapping: if (mod_name in k): (yield k) for k in self.file_mapping: if (mod_name not in k): (yield k)
'Load a single item if you have it'
def _load(self, key):
if ((not isinstance(key, six.string_types)) or (u'.' not in key)): raise KeyError (mod_name, _) = key.split(u'.', 1) if (mod_name in self.missing_modules): return True if (self.whitelist and (mod_name not in self.whitelist)): raise KeyError def _inner_load(mod_name): for name in self._iter_files(mod_name): if (name in self.loaded_files): continue if (self._load_module(name) and (key in self._dict)): return True return False ret = None reloaded = False while True: try: ret = _inner_load(mod_name) if ((not reloaded) and (ret is not True)): self.refresh_file_mapping() reloaded = True continue break except IOError: if (not reloaded): self.refresh_file_mapping() reloaded = True continue return ret
'Load all of them'
def _load_all(self):
for name in self.file_mapping: if ((name in self.loaded_files) or (name in self.missing_modules)): continue self._load_module(name) self.loaded = True
'Apply the __outputter__ variable to the functions'
def _apply_outputter(self, func, mod):
if hasattr(mod, u'__outputter__'): outp = mod.__outputter__ if (func.__name__ in outp): func.__outputter__ = outp[func.__name__]
'Given a loaded module and its default name determine its virtual name This function returns a tuple. The first value will be either True or False and will indicate if the module should be loaded or not (i.e. if it threw and exception while processing its __virtual__ function). The second value is the determined virtual name, which may be the same as the value provided. The default name can be calculated as follows:: module_name = mod.__name__.rsplit(\'.\', 1)[-1]'
def process_virtual(self, mod, module_name, virtual_func=u'__virtual__'):
virtual_aliases = getattr(mod, u'__virtual_aliases__', tuple()) try: error_reason = None if (hasattr(mod, u'__virtual__') and inspect.isfunction(mod.__virtual__)): try: start = time.time() virtual = getattr(mod, virtual_func)() if isinstance(virtual, tuple): error_reason = virtual[1] virtual = virtual[0] if self.opts.get(u'virtual_timer', False): end = (time.time() - start) msg = u'Virtual function took {0} seconds for {1}'.format(end, module_name) log.warning(msg) except Exception as exc: error_reason = u'Exception raised when processing __virtual__ function for {0}. Module will not be loaded: {1}'.format(mod.__name__, exc) log.error(error_reason, exc_info_on_loglevel=logging.DEBUG) virtual = None virtualname = getattr(mod, u'__virtualname__', virtual) if (not virtual): if (virtual is None): log.warning(u"%s.__virtual__() is wrongly returning `None`. It should either return `True`, `False` or a new name. If you're the developer of the module '%s', please fix this.", mod.__name__, module_name) return (False, module_name, error_reason, virtual_aliases) if ((virtual is not True) and (module_name != virtual)): log.trace(u'Loaded %s as virtual %s', module_name, virtual) if (not hasattr(mod, u'__virtualname__')): salt.utils.warn_until(u'Hydrogen', u'The \'{0}\' module is renaming itself in its __virtual__() function ({1} => {2}). Please set it\'s virtual name as the \'__virtualname__\' module attribute. Example: "__virtualname__ = \'{2}\'"'.format(mod.__name__, module_name, virtual)) if (virtualname != virtual): log.error(u"The module '%s' is showing some bad usage. Its __virtualname__ attribute is set to '%s' yet the __virtual__() function is returning '%s'. These values should match!", mod.__name__, virtualname, virtual) module_name = virtualname elif ((virtual is True) and (virtualname != module_name)): if (virtualname is not True): module_name = virtualname except KeyError: log.debug(u'KeyError when loading %s', module_name, exc_info=True) except Exception: log.error(u'Failed to read the virtual function for %s: %s', self.tag, module_name, exc_info=True) return (False, module_name, error_reason, virtual_aliases) return (True, module_name, None, virtual_aliases)
'Create a salt master server instance :param dict opts: The salt options dictionary'
def __init__(self, opts):
self.opts = opts self.master_key = salt.crypt.MasterKeys(self.opts) self.key = self.__prep_key()
'A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root.'
def __prep_key(self):
return salt.daemons.masterapi.access_keys(self.opts)
'Create a maintenance instance :param dict opts: The salt options'
def __init__(self, opts, log_queue=None):
super(Maintenance, self).__init__(log_queue=log_queue) self.opts = opts self.loop_interval = int(self.opts[u'loop_interval']) self.rotate = int(time.time()) self.serial = salt.payload.Serial(self.opts)
'Some things need to be init\'d after the fork has completed The easiest example is that one of these module types creates a thread in the parent process, then once the fork happens you\'ll start getting errors like "WARNING: Mixing fork() and threads detected; memory leaked."'
def _post_fork_init(self):
self.fileserver = salt.fileserver.Fileserver(self.opts) ropts = dict(self.opts) ropts[u'quiet'] = True runner_client = salt.runner.RunnerClient(ropts) self.returners = salt.loader.returners(self.opts, {}) self.schedule = salt.utils.schedule.Schedule(self.opts, runner_client.functions_dict(), returners=self.returners) self.ckminions = salt.utils.minions.CkMinions(self.opts) self.event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=False) self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts) self.presence_events = False if self.opts.get(u'presence_events', False): tcp_only = True for (transport, _) in iter_transport_opts(self.opts): if (transport != u'tcp'): tcp_only = False if (not tcp_only): self.presence_events = True
'This is the general passive maintenance process controller for the Salt master. This is where any data that needs to be cleanly maintained from the master is maintained.'
def run(self):
salt.utils.appendproctitle(u'Maintenance') self._post_fork_init() last = int(time.time()) salt.daemons.masterapi.clean_fsbackend(self.opts) old_present = set() while True: now = int(time.time()) if ((now - last) >= self.loop_interval): salt.daemons.masterapi.clean_old_jobs(self.opts) salt.daemons.masterapi.clean_expired_tokens(self.opts) salt.daemons.masterapi.clean_pub_auth(self.opts) self.handle_git_pillar() self.handle_schedule() self.handle_key_cache() self.handle_presence(old_present) self.handle_key_rotate(now) salt.daemons.masterapi.fileserver_update(self.fileserver) salt.utils.verify.check_max_open_files(self.opts) last = now time.sleep(self.loop_interval)
'Evaluate accepted keys and create a msgpack file which contains a list'
def handle_key_cache(self):
if (self.opts[u'key_cache'] == u'sched'): keys = [] if (self.opts[u'transport'] in (u'zeromq', u'tcp')): acc = u'minions' else: acc = u'accepted' for fn_ in os.listdir(os.path.join(self.opts[u'pki_dir'], acc)): if ((not fn_.startswith(u'.')) and os.path.isfile(os.path.join(self.opts[u'pki_dir'], acc, fn_))): keys.append(fn_) log.debug(u'Writing master key cache') with salt.utils.atomicfile.atomic_open(os.path.join(self.opts[u'pki_dir'], acc, u'.key_cache')) as cache_file: self.serial.dump(keys, cache_file)
'Rotate the AES key rotation'
def handle_key_rotate(self, now):
to_rotate = False dfn = os.path.join(self.opts[u'cachedir'], u'.dfn') try: stats = os.stat(dfn) if (salt.utils.platform.is_windows() and (not os.access(dfn, os.W_OK))): to_rotate = True os.chmod(dfn, (stat.S_IRUSR | stat.S_IWUSR)) elif (stats.st_mode == 33024): to_rotate = True else: log.error(u'Found dropfile with incorrect permissions, ignoring...') os.remove(dfn) except os.error: pass if self.opts.get(u'publish_session'): if ((now - self.rotate) >= self.opts[u'publish_session']): to_rotate = True if to_rotate: log.info(u'Rotating master AES key') for (secret_key, secret_map) in six.iteritems(SMaster.secrets): with secret_map[u'secret'].get_lock(): secret_map[u'secret'].value = six.b(secret_map[u'reload']()) self.event.fire_event({u'rotate_{0}_key'.format(secret_key): True}, tag=u'key') self.rotate = now if self.opts.get(u'ping_on_rotate'): log.debug(u'Pinging all connected minions due to key rotation') salt.utils.master.ping_all_connected_minions(self.opts)
'Update git pillar'
def handle_git_pillar(self):
try: for pillar in self.git_pillar: pillar.update() except Exception as exc: log.error(u'Exception caught while updating git_pillar', exc_info=True)
'Evaluate the scheduler'
def handle_schedule(self):
try: self.schedule.eval() if (self.schedule.loop_interval < self.loop_interval): self.loop_interval = self.schedule.loop_interval except Exception as exc: log.error(u'Exception %s occurred in scheduled job', exc)
'Fire presence events if enabled'
def handle_presence(self, old_present):
if self.presence_events: present = self.ckminions.connected_ids() new = present.difference(old_present) lost = old_present.difference(present) if (new or lost): data = {u'new': list(new), u'lost': list(lost)} self.event.fire_event(data, tagify(u'change', u'presence')) data = {u'present': list(present)} self.event.fire_event(data, tagify(u'present', u'presence'), timeout=3) old_present.clear() old_present.update(present)
'Create a salt master server instance :param dict: The salt options'
def __init__(self, opts):
if HAS_ZMQ: try: zmq_version_info = zmq.zmq_version_info() except AttributeError: zmq_version_info = tuple([int(x) for x in zmq.zmq_version().split(u'.')]) if (zmq_version_info < (3, 2)): log.warning(u'You have a version of ZMQ less than ZMQ 3.2! There are known connection keep-alive issues with ZMQ < 3.2 which may result in loss of contact with minions. Please upgrade your ZMQ!') SMaster.__init__(self, opts)
'Run pre flight checks. If anything in this method fails then the master should not start up.'
def _pre_flight(self):
errors = [] critical_errors = [] try: os.chdir(u'/') except OSError as err: errors.append(u'Cannot change to root directory ({0})'.format(err)) if self.opts.get(u'fileserver_verify_config', True): fileserver = salt.fileserver.Fileserver(self.opts) if (not fileserver.servers): errors.append(u'Failed to load fileserver backends, the configured backends are: {0}'.format(u', '.join(self.opts[u'fileserver_backend']))) else: try: fileserver.init() except FileserverConfigError as exc: critical_errors.append(u'{0}'.format(exc)) if (not self.opts[u'fileserver_backend']): errors.append(u'No fileserver backends are configured') if (self.opts[u'pillar_cache'] and (not os.path.isdir(os.path.join(self.opts[u'cachedir'], u'pillar_cache')))): try: prev_umask = os.umask(63) os.mkdir(os.path.join(self.opts[u'cachedir'], u'pillar_cache')) os.umask(prev_umask) except OSError: pass if self.opts.get(u'git_pillar_verify_config', True): non_legacy_git_pillars = [x for x in self.opts.get(u'ext_pillar', []) if ((u'git' in x) and (not isinstance(x[u'git'], six.string_types)))] if non_legacy_git_pillars: try: new_opts = copy.deepcopy(self.opts) from salt.pillar.git_pillar import PER_REMOTE_OVERRIDES as per_remote_overrides, PER_REMOTE_ONLY as per_remote_only for repo in non_legacy_git_pillars: new_opts[u'ext_pillar'] = [repo] try: git_pillar = salt.utils.gitfs.GitPillar(new_opts) git_pillar.init_remotes(repo[u'git'], per_remote_overrides, per_remote_only) except FileserverConfigError as exc: critical_errors.append(exc.strerror) finally: del new_opts if (errors or critical_errors): for error in errors: log.error(error) for error in critical_errors: log.critical(error) log.critical(u'Master failed pre flight checks, exiting\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC)
'Turn on the master server components'
def start(self):
self._pre_flight() log.info(u"salt-master is starting as user '%s'", salt.utils.get_user()) enable_sigusr1_handler() enable_sigusr2_handler() self.__set_max_open_files() with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): SMaster.secrets[u'aes'] = {u'secret': multiprocessing.Array(ctypes.c_char, six.b(salt.crypt.Crypticle.generate_key_string())), u'reload': salt.crypt.Crypticle.generate_key_string} log.info(u'Creating master process manager') self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5) pub_channels = [] log.info(u'Creating master publisher process') for (transport, opts) in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.pre_fork(self.process_manager) pub_channels.append(chan) log.info(u'Creating master event publisher process') self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,)) if self.opts.get(u'reactor'): if isinstance(self.opts[u'engines'], list): rine = False for item in self.opts[u'engines']: if (u'reactor' in item): rine = True break if (not rine): self.opts[u'engines'].append({u'reactor': {}}) elif (u'reactor' not in self.opts[u'engines']): log.info(u'Enabling the reactor engine') self.opts[u'engines'][u'reactor'] = {} salt.engines.start_engines(self.opts, self.process_manager) log.info(u'Creating master maintenance process') self.process_manager.add_process(Maintenance, args=(self.opts,)) if self.opts.get(u'event_return'): log.info(u'Creating master event return process') self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,)) ext_procs = self.opts.get(u'ext_processes', []) for proc in ext_procs: log.info(u'Creating ext_processes process: %s', proc) try: mod = u'.'.join(proc.split(u'.')[:(-1)]) cls = proc.split(u'.')[(-1)] _tmp = __import__(mod, globals(), locals(), [cls], (-1)) cls = _tmp.__getattribute__(cls) self.process_manager.add_process(cls, args=(self.opts,)) except Exception: log.error(u'Error creating ext_processes process: %s', proc) if (HAS_HALITE and (u'halite' in self.opts)): log.info(u'Creating master halite process') self.process_manager.add_process(Halite, args=(self.opts[u'halite'],)) if self.opts[u'con_cache']: log.info(u'Creating master concache process') self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,)) log.debug(u'Sleeping for two seconds to let concache rest') time.sleep(2) log.info(u'Creating master request server process') kwargs = {} if salt.utils.platform.is_windows(): kwargs[u'log_queue'] = salt.log.setup.get_multiprocessing_logging_queue() kwargs[u'secrets'] = SMaster.secrets self.process_manager.add_process(ReqServer, args=(self.opts, self.key, self.master_key), kwargs=kwargs, name=u'ReqServer') if (signal.getsignal(signal.SIGINT) is signal.SIG_DFL): signal.signal(signal.SIGINT, self._handle_signals) if (signal.getsignal(signal.SIGTERM) is signal.SIG_DFL): signal.signal(signal.SIGTERM, self._handle_signals) self.process_manager.run()
'Create a halite instance :param dict hopts: The halite options'
def __init__(self, hopts, log_queue=None):
super(Halite, self).__init__(log_queue=log_queue) self.hopts = hopts
'Fire up halite!'
def run(self):
salt.utils.appendproctitle(self.__class__.__name__) halite.start(self.hopts)
'Create a request server :param dict opts: The salt options dictionary :key dict: The user starting the server and the AES key :mkey dict: The user starting the server and the RSA key :rtype: ReqServer :returns: Request server'
def __init__(self, opts, key, mkey, log_queue=None, secrets=None):
super(ReqServer, self).__init__(log_queue=log_queue) self.opts = opts self.master_key = mkey self.key = key self.secrets = secrets
'Binds the reply server'
def __bind(self):
if (self.log_queue is not None): salt.log.setup.set_multiprocessing_logging_queue(self.log_queue) salt.log.setup.setup_multiprocessing_logging(self.log_queue) if (self.secrets is not None): SMaster.secrets = self.secrets dfn = os.path.join(self.opts[u'cachedir'], u'.dfn') if os.path.isfile(dfn): try: if (salt.utils.platform.is_windows() and (not os.access(dfn, os.W_OK))): os.chmod(dfn, (stat.S_IRUSR | stat.S_IWUSR)) os.remove(dfn) except os.error: pass self.process_manager = salt.utils.process.ProcessManager(name=u'ReqServer_ProcessManager', wait_for_kill=1) req_channels = [] tcp_only = True for (transport, opts) in iter_transport_opts(self.opts): chan = salt.transport.server.ReqServerChannel.factory(opts) chan.pre_fork(self.process_manager) req_channels.append(chan) if (transport != u'tcp'): tcp_only = False kwargs = {} if salt.utils.platform.is_windows(): kwargs[u'log_queue'] = self.log_queue if (tcp_only and six.PY2 and (int(self.opts[u'worker_threads']) != 1)): log.warning(u'TCP transport supports only 1 worker on Windows when using Python 2.') self.opts[u'worker_threads'] = 1 with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): for ind in range(int(self.opts[u'worker_threads'])): name = u'MWorker-{0}'.format(ind) self.process_manager.add_process(MWorker, args=(self.opts, self.master_key, self.key, req_channels, name), kwargs=kwargs, name=name) self.process_manager.run()
'Start up the ReqServer'
def run(self):
self.__bind()
'Create a salt master worker process :param dict opts: The salt options :param dict mkey: The user running the salt master and the AES key :param dict key: The user running the salt master and the RSA key :rtype: MWorker :return: Master worker'
def __init__(self, opts, mkey, key, req_channels, name, **kwargs):
kwargs[u'name'] = name super(MWorker, self).__init__(**kwargs) self.opts = opts self.req_channels = req_channels self.mkey = mkey self.key = key self.k_mtime = 0
'Bind to the local port'
def __bind(self):
if HAS_ZMQ: zmq.eventloop.ioloop.install() self.io_loop = LOOP_CLASS() self.io_loop.make_current() for req_channel in self.req_channels: req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) try: self.io_loop.start() except (KeyboardInterrupt, SystemExit): pass
'The _handle_payload method is the key method used to figure out what needs to be done with communication to the server Example cleartext payload generated for \'salt myminion test.ping\': {\'enc\': \'clear\', \'load\': {\'arg\': [], \'cmd\': \'publish\', \'fun\': \'test.ping\', \'jid\': \'\', \'key\': \'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj\', \'kwargs\': {\'show_jid\': False, \'show_timeout\': False}, \'ret\': \'\', \'tgt\': \'myminion\', \'tgt_type\': \'glob\', \'user\': \'root\'}} :param dict payload: The payload route to the appropriate handler'
@tornado.gen.coroutine def _handle_payload(self, payload):
key = payload[u'enc'] load = payload[u'load'] ret = {u'aes': self._handle_aes, u'clear': self._handle_clear}[key](load) raise tornado.gen.Return(ret)
'Process a cleartext command :param dict load: Cleartext payload :return: The result of passing the load to a function in ClearFuncs corresponding to the command specified in the load\'s \'cmd\' key.'
def _handle_clear(self, load):
log.trace(u'Clear payload received with command %s', load[u'cmd']) if load[u'cmd'].startswith(u'__'): return False return (getattr(self.clear_funcs, load[u'cmd'])(load), {u'fun': u'send_clear'})
'Process a command sent via an AES key :param str load: Encrypted payload :return: The result of passing the load to a function in AESFuncs corresponding to the command specified in the load\'s \'cmd\' key.'
def _handle_aes(self, data):
if (u'cmd' not in data): log.error(u'Received malformed command %s', data) return {} log.trace(u'AES payload received with command %s', data[u'cmd']) if data[u'cmd'].startswith(u'__'): return False return self.aes_funcs.run_func(data[u'cmd'], data)
'Start a Master Worker'
def run(self):
salt.utils.appendproctitle(self.name) self.clear_funcs = ClearFuncs(self.opts, self.key) self.aes_funcs = AESFuncs(self.opts) salt.utils.reinit_crypto() self.__bind()
'Create a new AESFuncs :param dict opts: The salt options :rtype: AESFuncs :returns: Instance for handling AES operations'
def __init__(self, opts):
self.opts = opts self.event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) self.local = salt.client.get_local_client(self.opts[u'conf_file']) self.mminion = salt.minion.MasterMinion(self.opts, states=False, rend=False, ignore_config_errors=True) self.__setup_fileserver() self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
'Set the local file objects from the file server interface'
def __setup_fileserver(self):
self.fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = self.fs_.serve_file self._file_find = self.fs_._find_file self._file_hash = self.fs_.file_hash self._file_hash_and_stat = self.fs_.file_hash_and_stat self._file_list = self.fs_.file_list self._file_list_emptydirs = self.fs_.file_list_emptydirs self._dir_list = self.fs_.dir_list self._symlink_list = self.fs_.symlink_list self._file_envs = self.fs_.envs
'Take a minion id and a string signed with the minion private key The string needs to verify as \'salt\' with the minion public key :param str id_: A minion ID :param str token: A string signed with the minion private key :rtype: bool :return: Boolean indicating whether or not the token can be verified.'
def __verify_minion(self, id_, token):
if (not salt.utils.verify.valid_id(self.opts, id_)): return False pub_path = os.path.join(self.opts[u'pki_dir'], u'minions', id_) try: with salt.utils.files.fopen(pub_path, u'r') as fp_: minion_pub = fp_.read() pub = RSA.importKey(minion_pub) except (IOError, OSError): log.warning(u'Salt minion claiming to be %s attempted to communicate with master, but key could not be read and verification was denied.', id_) return False except (ValueError, IndexError, TypeError) as err: log.error(u'Unable to load public key "%s": %s', pub_path, err) try: if (salt.crypt.public_decrypt(pub, token) == 'salt'): return True except ValueError as err: log.error(u'Unable to decrypt token: %s', err) log.error(u'Salt minion claiming to be %s has attempted to communicate with the master and could not be verified', id_) return False
'Take a minion id and a string signed with the minion private key The string needs to verify as \'salt\' with the minion public key :param str id_: A minion ID :param str token: A string signed with the minion private key :rtype: bool :return: Boolean indicating whether or not the token can be verified.'
def verify_minion(self, id_, token):
return self.__verify_minion(id_, token)
'Verify that the passed information authorized a minion to execute :param dict clear_load: A publication load from a minion :rtype: bool :return: A boolean indicating if the minion is allowed to publish the command in the load'
def __verify_minion_publish(self, clear_load):
if (u'peer' not in self.opts): return False if (not isinstance(self.opts[u'peer'], dict)): return False if any(((key not in clear_load) for key in (u'fun', u'arg', u'tgt', u'ret', u'tok', u'id'))): return False if clear_load[u'fun'].startswith(u'publish.'): return False if (not self.__verify_minion(clear_load[u'id'], clear_load[u'tok'])): log.warning(u'Minion id %s is not who it says it is and is attempting to issue a peer command', clear_load[u'id']) return False clear_load.pop(u'tok') perms = [] for match in self.opts[u'peer']: if re.match(match, clear_load[u'id']): if isinstance(self.opts[u'peer'][match], list): perms.extend(self.opts[u'peer'][match]) if (u',' in clear_load[u'fun']): clear_load[u'fun'] = clear_load[u'fun'].split(u',') arg_ = [] for arg in clear_load[u'arg']: arg_.append(arg.split()) clear_load[u'arg'] = arg_ return self.ckminions.auth_check(perms, clear_load[u'fun'], clear_load[u'arg'], clear_load[u'tgt'], clear_load.get(u'tgt_type', u'glob'), publish_validate=True)
'A utility function to perform common verification steps. :param dict load: A payload received from a minion :param list verify_keys: A list of strings that should be present in a given load :rtype: bool :rtype: dict :return: The original load (except for the token) if the load can be verified. False if the load is invalid.'
def __verify_load(self, load, verify_keys):
if any(((key not in load) for key in verify_keys)): return False if (u'tok' not in load): log.error(u"Received incomplete call from %s for '%s', missing '%s'", load[u'id'], inspect_stack()[u'co_name'], u'tok') return False if (not self.__verify_minion(load[u'id'], load[u'tok'])): log.warning(u'Minion id %s is not who it says it is!', load[u'id']) return False if (u'tok' in load): load.pop(u'tok') return load
'Return the results from an external node classifier if one is specified :param dict load: A payload received from a minion :return: The results from an external node classifier'
def _master_tops(self, load):
load = self.__verify_load(load, (u'id', u'tok')) if (load is False): return {} return self.masterapi._master_tops(load, skip_verify=True)
'Return the master options to the minion :param dict load: A payload received from a minion :rtype: dict :return: The master options'
def _master_opts(self, load):
mopts = {} file_roots = {} envs = self._file_envs() for saltenv in envs: if (saltenv not in file_roots): file_roots[saltenv] = [] mopts[u'file_roots'] = file_roots mopts[u'top_file_merging_strategy'] = self.opts[u'top_file_merging_strategy'] mopts[u'env_order'] = self.opts[u'env_order'] mopts[u'default_top'] = self.opts[u'default_top'] if load.get(u'env_only'): return mopts mopts[u'renderer'] = self.opts[u'renderer'] mopts[u'failhard'] = self.opts[u'failhard'] mopts[u'state_top'] = self.opts[u'state_top'] mopts[u'state_top_saltenv'] = self.opts[u'state_top_saltenv'] mopts[u'nodegroups'] = self.opts[u'nodegroups'] mopts[u'state_auto_order'] = self.opts[u'state_auto_order'] mopts[u'state_events'] = self.opts[u'state_events'] mopts[u'state_aggregate'] = self.opts[u'state_aggregate'] mopts[u'jinja_lstrip_blocks'] = self.opts[u'jinja_lstrip_blocks'] mopts[u'jinja_trim_blocks'] = self.opts[u'jinja_trim_blocks'] return mopts
'Gathers the data from the specified minions\' mine :param dict load: A payload received from a minion :rtype: dict :return: Mine data from the specified minions'
def _mine_get(self, load):
load = self.__verify_load(load, (u'id', u'tgt', u'fun', u'tok')) if (load is False): return {} else: return self.masterapi._mine_get(load, skip_verify=True)
'Store the mine data :param dict load: A payload received from a minion :rtype: bool :return: True if the data has been stored in the mine'
def _mine(self, load):
load = self.__verify_load(load, (u'id', u'data', u'tok')) if (load is False): return {} return self.masterapi._mine(load, skip_verify=True)
'Allow the minion to delete a specific function from its own mine :param dict load: A payload received from a minion :rtype: bool :return: Boolean indicating whether or not the given function was deleted from the mine'
def _mine_delete(self, load):
load = self.__verify_load(load, (u'id', u'fun', u'tok')) if (load is False): return {} else: return self.masterapi._mine_delete(load)
'Allow the minion to delete all of its own mine contents :param dict load: A payload received from a minion'
def _mine_flush(self, load):
load = self.__verify_load(load, (u'id', u'tok')) if (load is False): return {} else: return self.masterapi._mine_flush(load, skip_verify=True)
'Allows minions to send files to the master, files are sent to the master file cache'
def _file_recv(self, load):
if any(((key not in load) for key in (u'id', u'path', u'loc'))): return False if (not isinstance(load[u'path'], list)): return False if (not self.opts[u'file_recv']): return False if (not salt.utils.verify.valid_id(self.opts, load[u'id'])): return False file_recv_max_size = ((1024 * 1024) * self.opts[u'file_recv_max_size']) if ((u'loc' in load) and (load[u'loc'] < 0)): log.error(u'Invalid file pointer: load[loc] < 0') return False if ((len(load[u'data']) + load.get(u'loc', 0)) > file_recv_max_size): log.error(u'file_recv_max_size limit of %d MB exceeded! %s will be truncated. To successfully push this file, adjust file_recv_max_size to an integer (in MB) large enough to accommodate it.', file_recv_max_size, load[u'path']) return False if (u'tok' not in load): log.error(u"Received incomplete call from %s for '%s', missing '%s'", load[u'id'], inspect_stack()[u'co_name'], u'tok') return False if (not self.__verify_minion(load[u'id'], load[u'tok'])): log.warning(u'Minion id %s is not who it says it is!', load[u'id']) return {} load.pop(u'tok') sep_path = os.sep.join(load[u'path']) normpath = os.path.normpath(sep_path) if (os.path.isabs(normpath) or (u'../' in load[u'path'])): return False cpath = os.path.join(self.opts[u'cachedir'], u'minions', load[u'id'], u'files', normpath) if (not os.path.normpath(cpath).startswith(self.opts[u'cachedir'])): log.warning(u'Attempt to write received file outside of master cache directory! Requested path: %s. Access denied.', cpath) return False cdir = os.path.dirname(cpath) if (not os.path.isdir(cdir)): try: os.makedirs(cdir) except os.error: pass if (os.path.isfile(cpath) and (load[u'loc'] != 0)): mode = u'ab' else: mode = u'wb' with salt.utils.files.fopen(cpath, mode) as fp_: if load[u'loc']: fp_.seek(load[u'loc']) fp_.write(load[u'data']) return True
'Return the pillar data for the minion :param dict load: Minion payload :rtype: dict :return: The pillar data for the minion'
def _pillar(self, load):
if any(((key not in load) for key in (u'id', u'grains'))): return False if (not salt.utils.verify.valid_id(self.opts, load[u'id'])): return False load[u'grains'][u'id'] = load[u'id'] pillar_dirs = {} pillar = salt.pillar.get_pillar(self.opts, load[u'grains'], load[u'id'], load.get(u'saltenv', load.get(u'env')), ext=load.get(u'ext'), pillar_override=load.get(u'pillar_override', {}), pillarenv=load.get(u'pillarenv')) data = pillar.compile_pillar(pillar_dirs=pillar_dirs) self.fs_.update_opts() if self.opts.get(u'minion_data_cache', False): self.masterapi.cache.store(u'minions/{0}'.format(load[u'id']), u'data', {u'grains': load[u'grains'], u'pillar': data}) self.event.fire_event({u'Minion data cache refresh': load[u'id']}, tagify(load[u'id'], u'refresh', u'minion')) return data
'Receive an event from the minion and fire it on the master event interface :param dict load: The minion payload'
def _minion_event(self, load):
load = self.__verify_load(load, (u'id', u'tok')) if (load is False): return {} self.masterapi._minion_event(load) self._handle_minion_event(load)
'Act on specific events from minions'
def _handle_minion_event(self, load):
id_ = load[u'id'] if (load.get(u'tag', u'') == u'_salt_error'): log.error(u'Received minion error from [%s]: %s', id_, load[u'data'][u'message']) for event in load.get(u'events', []): event_data = event.get(u'data', {}) if (u'minions' in event_data): jid = event_data.get(u'jid') if (not jid): continue minions = event_data[u'minions'] try: salt.utils.job.store_minions(self.opts, jid, minions, mminion=self.mminion, syndic_id=id_) except (KeyError, salt.exceptions.SaltCacheError) as exc: log.error(u'Could not add minion(s) %s for job %s: %s', minions, jid, exc)
'Handle the return data sent from the minions. Takes the return, verifies it and fires it on the master event bus. Typically, this event is consumed by the Salt CLI waiting on the other end of the event bus but could be heard by any listener on the bus. :param dict load: The minion payload'
def _return(self, load):
if (self.opts[u'require_minion_sign_messages'] and (u'sig' not in load)): log.critical(u'_return: Master is requiring minions to sign their messages, but there is no signature in this payload from %s.', load[u'id']) return False if (u'sig' in load): log.trace(u'Verifying signed event publish from minion') sig = load.pop(u'sig') this_minion_pubkey = os.path.join(self.opts[u'pki_dir'], u'minions/{0}'.format(load[u'id'])) serialized_load = salt.serializers.msgpack.serialize(load) if (not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig)): log.info(u'Failed to verify event signature from minion %s.', load[u'id']) if self.opts[u'drop_messages_signature_fail']: log.critical(u'Drop_messages_signature_fail is enabled, dropping message from %s', load[u'id']) return False else: log.info(u"But 'drop_message_signature_fail' is disabled, so message is still accepted.") load[u'sig'] = sig try: salt.utils.job.store_job(self.opts, load, event=self.event, mminion=self.mminion) except salt.exceptions.SaltCacheError: log.error(u'Could not store job information for load: %s', load)
'Receive a syndic minion return and format it to look like returns from individual minions. :param dict load: The minion payload'
def _syndic_return(self, load):
if any(((key not in load) for key in (u'return', u'jid', u'id'))): return None if load.get(u'load'): fstr = u'{0}.save_load'.format(self.opts[u'master_job_cache']) self.mminion.returners[fstr](load[u'jid'], load[u'load']) syndic_cache_path = os.path.join(self.opts[u'cachedir'], u'syndics', load[u'id']) if (not os.path.exists(syndic_cache_path)): path_name = os.path.split(syndic_cache_path)[0] if (not os.path.exists(path_name)): os.makedirs(path_name) with salt.utils.files.fopen(syndic_cache_path, u'w') as wfh: wfh.write(u'') for (key, item) in six.iteritems(load[u'return']): ret = {u'jid': load[u'jid'], u'id': key} ret.update(item) if (u'master_id' in load): ret[u'master_id'] = load[u'master_id'] if (u'fun' in load): ret[u'fun'] = load[u'fun'] if (u'arg' in load): ret[u'fun_args'] = load[u'arg'] if (u'out' in load): ret[u'out'] = load[u'out'] if (u'sig' in load): ret[u'sig'] = load[u'sig'] self._return(ret)
'Execute a runner from a minion, return the runner\'s function data :param dict clear_load: The minion payload :rtype: dict :return: The runner function data'
def minion_runner(self, clear_load):
load = self.__verify_load(clear_load, (u'fun', u'arg', u'id', u'tok')) if (load is False): return {} else: return self.masterapi.minion_runner(clear_load)
'Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution. :param dict load: The minion payload :rtype: dict :return: Return data corresponding to a given JID'
def pub_ret(self, load):
load = self.__verify_load(load, (u'jid', u'id', u'tok')) if (load is False): return {} auth_cache = os.path.join(self.opts[u'cachedir'], u'publish_auth') if (not os.path.isdir(auth_cache)): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(load[u'jid'])) with salt.utils.files.fopen(jid_fn, u'r') as fp_: if (not (load[u'id'] == fp_.read())): return {} return self.local.get_cache_returns(load[u'jid'])
'Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: .. code-block:: bash peer: This configuration will enable all minions to execute all commands: .. code-block:: bash peer: foo.example.com: - test.* The above configuration will only allow the minion foo.example.com to execute commands from the test module. :param dict clear_load: The minion pay'
def minion_pub(self, clear_load):
if (not self.__verify_minion_publish(clear_load)): return {} else: return self.masterapi.minion_pub(clear_load)
'Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: .. code-block:: bash peer: This configuration will enable all minions to execute all commands. peer: .. code-block:: bash foo.example.com: - test.* The above configuration will only allow the minion foo.example.com to execute commands from the test module. :param dict clear_load: The minion payload'
def minion_publish(self, clear_load):
if (not self.__verify_minion_publish(clear_load)): return {} else: return self.masterapi.minion_publish(clear_load)
'Allow a minion to request revocation of its own key :param dict load: The minion payload :rtype: dict :return: If the load is invalid, it may be returned. No key operation is performed. :rtype: bool :return: True if key was revoked, False if not'
def revoke_auth(self, load):
load = self.__verify_load(load, (u'id', u'tok')) if (not self.opts.get(u'allow_minion_key_revoke', False)): log.warning(u'Minion %s requested key revoke, but allow_minion_key_revoke is set to False', load[u'id']) return load if (load is False): return load else: return self.masterapi.revoke_auth(load)
'Wrapper for running functions executed with AES encryption :param function func: The function to run :return: The result of the master function that was called'
def run_func(self, func, load):
if func.startswith(u'__'): return ({}, {u'fun': u'send'}) if hasattr(self, func): try: start = time.time() ret = getattr(self, func)(load) log.trace(u'Master function call %s took %s seconds', func, (time.time() - start)) except Exception: ret = u'' log.error(u'Error in function %s:\n', func, exc_info=True) else: log.error(u'Received function %s which is unavailable on the master, returning False', func) return (False, {u'fun': u'send'}) if (func == u'_return'): return (ret, {u'fun': u'send'}) if ((func == u'_pillar') and (u'id' in load)): if ((load.get(u'ver') != u'2') and (self.opts[u'pillar_version'] == 1)): return (ret, {u'fun': u'send'}) return (ret, {u'fun': u'send_private', u'key': u'pillar', u'tgt': load[u'id']}) return (ret, {u'fun': u'send'})
'Send a master control function back to the runner system'
def runner(self, clear_load):
if (u'token' in clear_load): token = self.loadauth.authenticate_token(clear_load) if (not token): return dict(error=dict(name=u'TokenAuthenticationError', message=u'Authentication failure of type "token" occurred.')) if (self.opts[u'keep_acl_in_token'] and (u'auth_list' in token)): auth_list = token[u'auth_list'] else: clear_load[u'eauth'] = token[u'eauth'] clear_load[u'username'] = token[u'name'] auth_list = self.loadauth.get_auth_list(clear_load) if (not self.ckminions.runner_check(auth_list, clear_load[u'fun'])): return dict(error=dict(name=u'TokenAuthenticationError', message=u'Authentication failure of type "token" occurred for user {0}.'.format(token[u'name']))) clear_load.pop(u'token') username = token[u'name'] elif (u'eauth' in clear_load): if (not self.loadauth.authenticate_eauth(clear_load)): return dict(error=dict(name=u'EauthAuthenticationError', message=u'Authentication failure of type "eauth" occurred for user {0}.'.format(clear_load.get(u'username', u'UNKNOWN')))) auth_list = self.loadauth.get_auth_list(clear_load) if (not self.ckminions.runner_check(auth_list, clear_load[u'fun'])): return dict(error=dict(name=u'EauthAuthenticationError', message=u'Authentication failure of type "eauth" occurred for user {0}.'.format(clear_load.get(u'username', u'UNKNOWN')))) username = clear_load.pop(u'username', u'UNKNOWN') clear_load.pop(u'password', None) else: if (not self.loadauth.authenticate_key(clear_load, self.key)): return dict(error=dict(name=u'UserAuthenticationError', message=u'Authentication failure of type "user" occurred')) if (u'user' in clear_load): username = clear_load[u'user'] if salt.auth.AuthUser(username).is_sudo(): username = self.opts.get(u'user', u'root') else: username = salt.utils.get_user() try: fun = clear_load.pop(u'fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, clear_load.get(u'kwarg', {}), username) except Exception as exc: log.error(u'Exception occurred while introspecting %s: %s', fun, exc) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc)))
'Send a master control function back to the wheel system'
def wheel(self, clear_load):
username = None if (u'token' in clear_load): token = self.loadauth.authenticate_token(clear_load) if (not token): return dict(error=dict(name=u'TokenAuthenticationError', message=u'Authentication failure of type "token" occurred.')) if (self.opts[u'keep_acl_in_token'] and (u'auth_list' in token)): auth_list = token[u'auth_list'] else: clear_load[u'eauth'] = token[u'eauth'] clear_load[u'username'] = token[u'name'] auth_list = self.loadauth.get_auth_list(clear_load) if (not self.ckminions.wheel_check(auth_list, clear_load[u'fun'])): return dict(error=dict(name=u'TokenAuthenticationError', message=u'Authentication failure of type "token" occurred for user {0}.'.format(token[u'name']))) clear_load.pop(u'token') username = token[u'name'] elif (u'eauth' in clear_load): if (not self.loadauth.authenticate_eauth(clear_load)): return dict(error=dict(name=u'EauthAuthenticationError', message=u'Authentication failure of type "eauth" occurred for user {0}.'.format(clear_load.get(u'username', u'UNKNOWN')))) auth_list = self.loadauth.get_auth_list(clear_load) if (not self.ckminions.wheel_check(auth_list, clear_load[u'fun'])): return dict(error=dict(name=u'EauthAuthenticationError', message=u'Authentication failure of type "eauth" occurred for user {0}.'.format(clear_load.get(u'username', u'UNKNOWN')))) clear_load.pop(u'password', None) username = clear_load.pop(u'username', u'UNKNOWN') else: if (not self.loadauth.authenticate_key(clear_load, self.key)): return dict(error=dict(name=u'UserAuthenticationError', message=u'Authentication failure of type "user" occurred')) if (u'user' in clear_load): username = clear_load[u'user'] if salt.auth.AuthUser(username).is_sudo(): username = self.opts.get(u'user', u'root') else: username = salt.utils.get_user() try: jid = salt.utils.jid.gen_jid() fun = clear_load.pop(u'fun') tag = tagify(jid, prefix=u'wheel') data = {u'fun': u'wheel.{0}'.format(fun), u'jid': jid, u'tag': tag, u'user': username} self.event.fire_event(data, tagify([jid, u'new'], u'wheel')) ret = self.wheel_.call_func(fun, full_return=True, **clear_load) data[u'return'] = ret[u'return'] data[u'success'] = ret[u'success'] self.event.fire_event(data, tagify([jid, u'ret'], u'wheel')) return {u'tag': tag, u'data': data} except Exception as exc: log.error(u'Exception occurred while introspecting %s: %s', fun, exc) data[u'return'] = u'Exception occurred in wheel {0}: {1}: {2}'.format(fun, exc.__class__.__name__, exc) data[u'success'] = False self.event.fire_event(data, tagify([jid, u'ret'], u'wheel')) return {u'tag': tag, u'data': data}
'Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds.'
def mk_token(self, clear_load):
token = self.loadauth.mk_token(clear_load) if (not token): log.warning(u'Authentication failure of type "eauth" occurred.') return u'' return token
'Return the name associated with a token or False if the token is invalid'
def get_token(self, clear_load):
if (u'token' not in clear_load): return False return self.loadauth.get_tok(clear_load[u'token'])
'This method sends out publications to the minions, it can only be used by the LocalClient.'
def publish(self, clear_load):
extra = clear_load.get(u'kwargs', {}) publisher_acl = salt.acl.PublisherACL(self.opts[u'publisher_acl_blacklist']) if (publisher_acl.user_is_blacklisted(clear_load[u'user']) or publisher_acl.cmd_is_blacklisted(clear_load[u'fun'])): log.error(u'%s does not have permissions to run %s. Please contact your local administrator if you believe this is in error.\n', clear_load[u'user'], clear_load[u'fun']) return u'' delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM) minions = self.ckminions.check_minions(clear_load[u'tgt'], clear_load.get(u'tgt_type', u'glob'), delimiter) if extra.get(u'token', False): token = self.loadauth.authenticate_token(extra) if (not token): return u'' if (self.opts[u'keep_acl_in_token'] and (u'auth_list' in token)): auth_list = token[u'auth_list'] else: extra[u'eauth'] = token[u'eauth'] extra[u'username'] = token[u'name'] auth_list = self.loadauth.get_auth_list(extra) if (not self.ckminions.auth_check(auth_list, clear_load[u'fun'], clear_load[u'arg'], clear_load[u'tgt'], clear_load.get(u'tgt_type', u'glob'), minions=minions, whitelist=[u'saltutil.find_job'])): log.warning(u'Authentication failure of type "token" occurred.') return u'' clear_load[u'user'] = token[u'name'] log.debug(u'Minion tokenized user = "%s"', clear_load[u'user']) elif (u'eauth' in extra): if (not self.loadauth.authenticate_eauth(extra)): return u'' auth_list = self.loadauth.get_auth_list(extra) if (not self.ckminions.auth_check(auth_list, clear_load[u'fun'], clear_load[u'arg'], clear_load[u'tgt'], clear_load.get(u'tgt_type', u'glob'), minions=minions, whitelist=[u'saltutil.find_job'])): log.warning(u'Authentication failure of type "eauth" occurred.') return u'' clear_load[u'user'] = self.loadauth.load_name(extra) else: auth_ret = self.loadauth.authenticate_key(clear_load, self.key) if (auth_ret is False): return u'' if (auth_ret is not True): if salt.auth.AuthUser(clear_load[u'user']).is_sudo(): if ((not self.opts[u'sudo_acl']) or (not self.opts[u'publisher_acl'])): auth_ret = True if (auth_ret is not True): auth_list = salt.utils.get_values_of_matching_keys(self.opts[u'publisher_acl'], auth_ret) if (not auth_list): log.warning(u'Authentication failure of type "user" occurred.') return u'' if (not self.ckminions.auth_check(auth_list, clear_load[u'fun'], clear_load[u'arg'], clear_load[u'tgt'], clear_load.get(u'tgt_type', u'glob'), minions=minions, whitelist=[u'saltutil.find_job'])): log.warning(u'Authentication failure of type "user" occurred.') return u'' if (not self.opts.get(u'order_masters')): if (not minions): return {u'enc': u'clear', u'load': {u'jid': None, u'minions': minions, u'error': u'Master could not resolve minions for target {0}'.format(clear_load[u'tgt'])}} jid = self._prep_jid(clear_load, extra) if (jid is None): return {u'enc': u'clear', u'load': {u'error': u'Master failed to assign jid'}} payload = self._prep_pub(minions, jid, clear_load, extra) self._send_pub(payload) return {u'enc': u'clear', u'load': {u'jid': clear_load[u'jid'], u'minions': minions}}
'Return a jid for this publication'
def _prep_jid(self, clear_load, extra):
passed_jid = (clear_load[u'jid'] if clear_load.get(u'jid') else None) nocache = extra.get(u'nocache', False) fstr = u'{0}.prep_jid'.format(self.opts[u'master_job_cache']) try: jid = self.mminion.returners[fstr](nocache=nocache, passed_jid=passed_jid) except (KeyError, TypeError): msg = u"Failed to allocate a jid. The requested returner '{0}' could not be loaded.".format(fstr.split(u'.')[0]) log.error(msg) return {u'error': msg} return jid