desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Cache a file from the salt master'
def cache_file(self, template):
saltpath = salt.utils.url.create(template) self.file_client().get_file(saltpath, '', True, self.saltenv)
'Cache a file only once'
def check_cache(self, template):
if (template not in self.cached): self.cache_file(template) self.cached.append(template)
'Ensure that printed mappings are YAML friendly.'
def finalizer(self, data):
def explore(data): if isinstance(data, (dict, OrderedDict)): return PrintableDict([(key, explore(value)) for (key, value) in six.iteritems(data)]) elif isinstance(data, (list, tuple, set)): return data.__class__([explore(value) for value in data]) return data return explore(data)
'Render a formatted multi-line XML string from a complex Python data structure. Supports tag attributes and nested dicts/lists. :param value: Complex data structure representing XML contents :returns: Formatted XML string rendered with newlines and indentation :rtype: str'
def format_xml(self, value):
def normalize_iter(value): if isinstance(value, (list, tuple)): if isinstance(value[0], str): xmlval = value else: xmlval = [] elif isinstance(value, dict): xmlval = list(value.items()) else: raise TemplateRuntimeError('Value is not a dict or list. Cannot render as XML') return xmlval def recurse_tree(xmliter, element=None): sub = None for (tag, attrs) in xmliter: if isinstance(attrs, list): for attr in attrs: recurse_tree(((tag, attr),), element) elif (element is not None): sub = SubElement(element, tag) else: sub = Element(tag) if isinstance(attrs, (str, int, bool, float)): sub.text = str(attrs) continue if isinstance(attrs, dict): sub.attrib = {attr: str(val) for (attr, val) in attrs.items() if (not isinstance(val, (dict, list)))} for (tag, val) in [item for item in normalize_iter(attrs) if isinstance(item[1], (dict, list))]: recurse_tree(((tag, val),), sub) return sub return Markup(minidom.parseString(tostring(recurse_tree(normalize_iter(value)))).toprettyxml(indent=' '))
'Determine if this ContextDict is currently overridden Since the ContextDict can be overridden in each thread, we check whether the _state.data is set or not.'
@property def active(self):
try: return (self._state.data is not None) except AttributeError: return False
'Clone this context, and return the ChildContextDict'
def clone(self, **kwargs):
child = ChildContextDict(parent=self, threadsafe=self._threadsafe, overrides=kwargs) return child
'Return the next iteration by popping `chunk_size` from the left and appending `chunk_size` to the right if there\'s info on the file left to be read.'
def next(self):
if (self.__buffered is None): multiplier = (self.__max_in_mem // self.__chunk_size) self.__buffered = '' else: multiplier = 1 self.__buffered = self.__buffered[self.__chunk_size:] if six.PY3: data = self.__file.read((self.__chunk_size * multiplier)).decode(__salt_system_encoding__) else: data = self.__file.read((self.__chunk_size * multiplier)) if (not data): self.__file.close() raise StopIteration self.__buffered += data return self.__buffered
'Establishes a connection to the remote server. The format for parameters is: username (string): The username to use for this ssh connection. Defaults to root. password (string): The password to use for this ssh connection. Defaults to password. host (string): The host to connect to. Defaults to localhost. key_accept (boolean): Should we accept this host\'s key and add it to the known_hosts file? Defaults to False. prompt (string): The shell prompt (regex) on the server. Prompt is compiled into a regular expression. Defaults to (Cmd) passwd_retries (int): How many times should I try to send the password? Defaults to 3. linesep (string): The line separator to use when sending commands to the server. Defaults to os.linesep. ssh_args (string): Extra ssh args to use with ssh. Example: \'-o PubkeyAuthentication=no\''
def __init__(self, username='salt', password='password', host='localhost', key_accept=False, prompt='(Cmd)', passwd_retries=3, linesep=os.linesep, ssh_args=''):
self.conn = Terminal('ssh {0} -l {1} {2}'.format(ssh_args, username, host), shell=True, log_stdout=True, log_stdout_level='trace', log_stderr=True, log_stderr_level='trace', stream_stdout=False, stream_stderr=False) sent_passwd = 0 self.prompt_re = re.compile(prompt) self.linesep = linesep while self.conn.has_unread_data: (stdout, stderr) = self.conn.recv() if (stdout and SSH_PASSWORD_PROMPT_RE.search(stdout)): if (not password): log.error('Failure while authentication.') raise TerminalException('Permission denied, no authentication information') if (sent_passwd < passwd_retries): self.conn.sendline(password, self.linesep) sent_passwd += 1 continue else: raise TerminalException('Password authentication failed') elif (stdout and KEY_VALID_RE.search(stdout)): if key_accept: log.info('Adding {0} to known_hosts'.format(host)) self.conn.sendline('yes') continue else: self.conn.sendline('no') elif (stdout and self.prompt_re.search(stdout)): break
'Send this command to the server and return a tuple of the output and the stderr. The format for parameters is: cmd (string): The command to send to the sever.'
def sendline(self, cmd):
self.conn.sendline(cmd, self.linesep) ret_stdout = [] ret_stderr = [] while self.conn.has_unread_data: (stdout, stderr) = self.conn.recv() if stdout: ret_stdout.append(stdout) if stderr: log.debug('Error while executing command.') ret_stderr.append(stderr) if (stdout and self.prompt_re.search(stdout)): break return (''.join(ret_stdout), ''.join(ret_stderr))
'Close the server connection'
def close_connection(self):
self.conn.close(terminate=True, kill=True)
'Init'
def __init__(self, content):
self.content = content self.finished = False
'Looks like a file handle'
def string(self, writesize=None):
if (not self.finished): self.finished = True return self.content return ''
'Build the mapping for YAML'
def construct_mapping(self, node, deep=False):
if (not isinstance(node, MappingNode)): raise ConstructorError(None, None, 'expected a mapping node, but found {0}'.format(node.id), node.start_mark) self.flatten_mapping(node) mapping = self.dictclass() for (key_node, value_node) in node.value: key = self.construct_object(key_node, deep=deep) try: hash(key) except TypeError: err = 'While constructing a mapping {0} found unacceptable key {1}'.format(node.start_mark, key_node.start_mark) raise ConstructorError(err) value = self.construct_object(value_node, deep=deep) if (key in mapping): raise ConstructorError("Conflicting ID '{0}'".format(key)) mapping[key] = value return mapping
'Verify integers and pass them in correctly is they are declared as octal'
def construct_scalar(self, node):
if (node.tag == 'tag:yaml.org,2002:int'): if (node.value == '0'): pass elif (node.value.startswith('0') and (not node.value.startswith(('0b', '0x')))): node.value = node.value.lstrip('0') if (node.value == ''): node.value = '0' elif (node.tag == 'tag:yaml.org,2002:str'): if re.match('^u([\\\'"]).+\\1$', node.value, flags=re.IGNORECASE): node.value = eval(node.value, {}, {}) return super(SaltYamlSafeLoader, self).construct_scalar(node)
'Only create one instance of Schedule'
def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None):
if (cls.instance is None): log.debug('Initializing new Schedule') cls.instance = object.__new__(cls) cls.instance.__singleton_init__(opts, functions, returners, intervals, cleanup, proxy) else: log.debug('Re-using Schedule') return cls.instance
'Return options merged from config and pillar'
def option(self, opt):
if ('config.merge' in self.functions): return self.functions['config.merge'](opt, {}, omit_master=True) return self.opts.get(opt, {})
'Return the schedule data structure'
def _get_schedule(self, include_opts=True, include_pillar=True):
schedule = {} if include_pillar: pillar_schedule = self.opts.get('pillar', {}).get('schedule', {}) if (not isinstance(pillar_schedule, dict)): raise ValueError('Schedule must be of type dict.') schedule.update(pillar_schedule) if include_opts: opts_schedule = self.opts.get('schedule', {}) if (not isinstance(opts_schedule, dict)): raise ValueError('Schedule must be of type dict.') schedule.update(opts_schedule) return schedule
'Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf'
def persist(self):
config_dir = self.opts.get('conf_dir', None) if ((config_dir is None) and ('conf_file' in self.opts)): config_dir = os.path.dirname(self.opts['conf_file']) if (config_dir is None): config_dir = salt.syspaths.CONFIG_DIR minion_d_dir = os.path.join(config_dir, os.path.dirname(self.opts.get('default_include', salt.config.DEFAULT_MINION_OPTS['default_include']))) if (not os.path.isdir(minion_d_dir)): os.makedirs(minion_d_dir) schedule_conf = os.path.join(minion_d_dir, '_schedule.conf') log.debug('Persisting schedule') try: with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_: fp_.write(salt.utils.stringutils.to_bytes(yaml.dump({'schedule': self._get_schedule(include_pillar=False)}, Dumper=SafeOrderedDumper))) except (IOError, OSError): log.error('Failed to persist the updated schedule', exc_info_on_loglevel=logging.DEBUG)
'Deletes a job from the scheduler. Ignore jobs from pillar'
def delete_job(self, name, persist=True):
if (name in self.opts['schedule']): del self.opts['schedule'][name] elif (name in self._get_schedule(include_opts=False)): log.warning('Cannot delete job {0}, it`s in the pillar!'.format(name)) evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_delete_complete') if (name in self.intervals): del self.intervals[name] if persist: self.persist()
'Deletes a job from the scheduler. Ignores jobs from pillar'
def delete_job_prefix(self, name, persist=True):
for job in list(self.opts['schedule'].keys()): if job.startswith(name): del self.opts['schedule'][job] for job in self._get_schedule(include_opts=False): if job.startswith(name): log.warning('Cannot delete job {0}, it`s in the pillar!'.format(job)) evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_delete_complete') for job in list(self.intervals.keys()): if job.startswith(name): del self.intervals[job] if persist: self.persist()
'Adds a new job to the scheduler. The format is the same as required in the configuration file. See the docs on how YAML is interpreted into python data-structures to make sure, you pass correct dictionaries.'
def add_job(self, data, persist=True):
if (not isinstance(data, dict)): raise ValueError('Scheduled jobs have to be of type dict.') if (not (len(data) == 1)): raise ValueError('You can only schedule one new job at a time.') for job in data: if ('enabled' not in data[job]): data[job]['enabled'] = True new_job = next(six.iterkeys(data)) if (new_job in self._get_schedule(include_opts=False)): log.warning('Cannot update job {0}, it`s in the pillar!'.format(new_job)) elif (new_job in self.opts['schedule']): log.info('Updating job settings for scheduled job: {0}'.format(new_job)) self.opts['schedule'].update(data) else: log.info('Added new job {0} to scheduler'.format(new_job)) self.opts['schedule'].update(data) evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_add_complete') if persist: self.persist()
'Enable a job in the scheduler. Ignores jobs from pillar'
def enable_job(self, name, persist=True):
if (name in self.opts['schedule']): self.opts['schedule'][name]['enabled'] = True log.info('Enabling job {0} in scheduler'.format(name)) elif (name in self._get_schedule(include_opts=False)): log.warning('Cannot modify job {0}, it`s in the pillar!'.format(name)) evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_enabled_job_complete') if persist: self.persist()
'Disable a job in the scheduler. Ignores jobs from pillar'
def disable_job(self, name, persist=True):
if (name in self.opts['schedule']): self.opts['schedule'][name]['enabled'] = False log.info('Disabling job {0} in scheduler'.format(name)) elif (name in self._get_schedule(include_opts=False)): log.warning('Cannot modify job {0}, it`s in the pillar!'.format(name)) evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_disabled_job_complete') if persist: self.persist()
'Modify a job in the scheduler. Ignores jobs from pillar'
def modify_job(self, name, schedule, persist=True):
if (name in self.opts['schedule']): self.delete_job(name, persist) elif (name in self._get_schedule(include_opts=False)): log.warning('Cannot modify job {0}, it`s in the pillar!'.format(name)) return self.opts['schedule'][name] = schedule if persist: self.persist()
'Run a schedule job now'
def run_job(self, name):
data = self._get_schedule().get(name, {}) if ('function' in data): func = data['function'] elif ('func' in data): func = data['func'] elif ('fun' in data): func = data['fun'] else: func = None if (func not in self.functions): log.info('Invalid function: {0} in scheduled job {1}.'.format(func, name)) if ('name' not in data): data['name'] = name log.info('Running Job: {0}.'.format(name)) multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess else: thread_cls = threading.Thread if multiprocessing_enabled: with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) proc.start() proc.join() else: proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) proc.start()
'Enable the scheduler.'
def enable_schedule(self):
self.opts['schedule']['enabled'] = True evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_enabled_complete')
'Disable the scheduler.'
def disable_schedule(self):
self.opts['schedule']['enabled'] = False evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_disabled_complete')
'Reload the schedule from saved schedule file.'
def reload(self, schedule):
self.intervals = {} if ('schedule' in schedule): schedule = schedule['schedule'] self.opts.setdefault('schedule', {}).update(schedule)
'List the current schedule items'
def list(self, where):
if (where == 'pillar'): schedule = self._get_schedule(include_opts=False) elif (where == 'opts'): schedule = self._get_schedule(include_pillar=False) else: schedule = self._get_schedule() evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': schedule}, tag='/salt/minion/minion_schedule_list_complete')
'Save the current schedule'
def save_schedule(self):
self.persist() evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag='/salt/minion/minion_schedule_saved')
'Execute this method in a multiprocess or thread'
def handle_func(self, multiprocessing_enabled, func, data):
if (salt.utils.platform.is_windows() or (self.opts.get('transport') == 'zeromq')): if (self.opts['__role'] == 'master'): self.functions = salt.loader.runner(self.opts) else: self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) ret = {'id': self.opts.get('id', 'master'), 'fun': func, 'fun_args': [], 'schedule': data['name'], 'jid': salt.utils.jid.gen_jid()} if ('metadata' in data): if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] ret['metadata']['_TOS'] = self.time_offset ret['metadata']['_TS'] = time.ctime() ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime()) else: log.warning('schedule: The metadata parameter must be specified as a dictionary. Ignoring.') salt.utils.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid'])) proc_fn = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), ret['jid']) if (('jid_include' not in data) or data['jid_include']): jobcount = 0 for job in salt.utils.minion.running(self.opts): if ('schedule' in job): log.debug('schedule.handle_func: Checking job against fun {0}: {1}'.format(ret['fun'], job)) if ((ret['schedule'] == job['schedule']) and salt.utils.process.os_is_running(job['pid'])): jobcount += 1 log.debug('schedule.handle_func: Incrementing jobcount, now {0}, maxrunning is {1}'.format(jobcount, data['maxrunning'])) if (jobcount >= data['maxrunning']): log.debug('schedule.handle_func: The scheduled job {0} was not started, {1} already running'.format(ret['schedule'], data['maxrunning'])) return False if (multiprocessing_enabled and (not salt.utils.platform.is_windows())): log_setup.setup_multiprocessing_logging() salt.utils.daemonize_if(self.opts) try: ret['pid'] = os.getpid() if (('jid_include' not in data) or data['jid_include']): log.debug('schedule.handle_func: adding this job to the jobcache with data {0}'.format(ret)) with salt.utils.files.fopen(proc_fn, 'w+b') as fp_: fp_.write(salt.payload.Serial(self.opts).dumps(ret)) args = tuple() if ('args' in data): args = data['args'] ret['fun_args'].extend(data['args']) kwargs = {} if ('kwargs' in data): kwargs = data['kwargs'] ret['fun_args'].append(copy.deepcopy(kwargs)) if (func not in self.functions): ret['return'] = self.functions.missing_fun_string(func) salt.utils.error.raise_error(message=self.functions.missing_fun_string(func)) argspec = salt.utils.args.get_function_argspec(self.functions[func]) if argspec.keywords: for (key, val) in six.iteritems(ret): kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) ret['return'] = self.functions[func](*args, **kwargs) data_returner = data.get('returner', None) if (data_returner or self.schedule_returner): if ('return_config' in data): ret['ret_config'] = data['return_config'] if ('return_kwargs' in data): ret['ret_kwargs'] = data['return_kwargs'] rets = [] for returner in [data_returner, self.schedule_returner]: if isinstance(returner, six.string_types): rets.append(returner) elif isinstance(returner, list): rets.extend(returner) for returner in OrderedDict.fromkeys(rets): ret_str = '{0}.returner'.format(returner) if (ret_str in self.returners): ret['success'] = True self.returners[ret_str](ret) else: log.info('Job {0} using invalid returner: {1}. Ignoring.'.format(func, returner)) if ('retcode' in self.functions.pack['__context__']): ret['retcode'] = self.functions.pack['__context__']['retcode'] ret['success'] = True except Exception: log.exception('Unhandled exception running {0}'.format(ret['fun'])) if ('return' not in ret): ret['return'] = 'Unhandled exception running {0}'.format(ret['fun']) ret['success'] = False ret['retcode'] = 254 finally: if (('__role' in self.opts) and (self.opts['__role'] in ('master', 'minion'))): if (('return_job' in data) and (not data['return_job'])): pass else: mret = ret.copy() mret['jid'] = 'req' if (data.get('return_job') == 'nocache'): mret['jid'] = 'nocache' load = {'cmd': '_return', 'id': self.opts['id']} for (key, value) in six.iteritems(mret): load[key] = value if (('__role' in self.opts) and (self.opts['__role'] == 'minion')): event = salt.utils.event.get_event('minion', opts=self.opts, listen=False) elif (('__role' in self.opts) and (self.opts['__role'] == 'master')): event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir']) try: event.fire_event(load, '__schedule_return') except Exception as exc: log.exception('Unhandled exception firing event: {0}'.format(exc)) log.debug('schedule.handle_func: Removing {0}'.format(proc_fn)) try: os.unlink(proc_fn) except OSError as exc: if ((exc.errno == errno.EEXIST) or (exc.errno == errno.ENOENT)): pass else: log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno)) raise finally: if multiprocessing_enabled: sys.exit(salt.defaults.exitcodes.EX_GENERIC)
'Evaluate and execute the schedule'
def eval(self):
def _splay(splaytime): '\n Calculate splaytime\n ' splay_ = None if isinstance(splaytime, dict): if (splaytime['end'] >= splaytime['start']): splay_ = random.randint(splaytime['start'], splaytime['end']) else: log.error('schedule.handle_func: Invalid Splay, end must be larger than start. Ignoring splay.') else: splay_ = random.randint(1, splaytime) return splay_ schedule = self._get_schedule() if (not isinstance(schedule, dict)): raise ValueError('Schedule must be of type dict.') if (('enabled' in schedule) and (not schedule['enabled'])): return for (job, data) in six.iteritems(schedule): if ((job == 'enabled') or (not data)): continue if (not isinstance(data, dict)): log.error('Scheduled job "{0}" should have a dict value, not {1}'.format(job, type(data))) continue if (('enabled' in data) and (not data['enabled'])): continue if ('function' in data): func = data['function'] elif ('func' in data): func = data['func'] elif ('fun' in data): func = data['fun'] else: func = None if (func not in self.functions): log.info('Invalid function: {0} in scheduled job {1}.'.format(func, job)) if ('name' not in data): data['name'] = job if ('_next_fire_time' not in data): data['_next_fire_time'] = None if ('_splay' not in data): data['_splay'] = None if (('run_on_start' in data) and data['run_on_start'] and ('_run_on_start' not in data)): data['_run_on_start'] = True now = int(time.time()) if ('until' in data): if (not _WHEN_SUPPORTED): log.error('Missing python-dateutil. Ignoring until.') else: until__ = dateutil_parser.parse(data['until']) until = int(time.mktime(until__.timetuple())) if (until <= now): log.debug('Until time has passed skipping job: {0}.'.format(data['name'])) continue if ('after' in data): if (not _WHEN_SUPPORTED): log.error('Missing python-dateutil. Ignoring after.') else: after__ = dateutil_parser.parse(data['after']) after = int(time.mktime(after__.timetuple())) if (after >= now): log.debug('After time has not passed skipping job: {0}.'.format(data['name'])) continue schedule_keys = set(data.keys()) time_elements = ('seconds', 'minutes', 'hours', 'days') scheduling_elements = ('when', 'cron', 'once') invalid_sched_combos = [set(i) for i in itertools.combinations(scheduling_elements, 2)] if any(((i <= schedule_keys) for i in invalid_sched_combos)): log.error('Unable to use "{0}" options together. Ignoring.'.format('", "'.join(scheduling_elements))) continue invalid_time_combos = [] for item in scheduling_elements: all_items = itertools.chain([item], time_elements) invalid_time_combos.append(set(itertools.combinations(all_items, 2))) if any(((set(x) <= schedule_keys) for x in invalid_time_combos)): log.error('Unable to use "{0}" with "{1}" options. Ignoring'.format('", "'.join(time_elements), '", "'.join(scheduling_elements))) continue if (True in [True for item in time_elements if (item in data)]): if ('_seconds' not in data): interval = int(data.get('seconds', 0)) interval += (int(data.get('minutes', 0)) * 60) interval += (int(data.get('hours', 0)) * 3600) interval += (int(data.get('days', 0)) * 86400) data['_seconds'] = interval if (not data['_next_fire_time']): data['_next_fire_time'] = (now + data['_seconds']) if (interval < self.loop_interval): self.loop_interval = interval elif ('once' in data): if (data['_next_fire_time'] and (data['_next_fire_time'] != now) and (not data['_splay'])): continue if ((not data['_next_fire_time']) and (not data['_splay'])): once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S') try: once = datetime.datetime.strptime(data['once'], once_fmt) data['_next_fire_time'] = int(time.mktime(once.timetuple())) except (TypeError, ValueError): log.error('Date string could not be parsed: %s, %s', data['once'], once_fmt) continue if (data['_next_fire_time'] != now): continue elif ('when' in data): if (not _WHEN_SUPPORTED): log.error('Missing python-dateutil. Ignoring job {0}.'.format(job)) continue if isinstance(data['when'], list): _when = [] for i in data['when']: if (('pillar' in self.opts) and ('whens' in self.opts['pillar']) and (i in self.opts['pillar']['whens'])): if (not isinstance(self.opts['pillar']['whens'], dict)): log.error('Pillar item "whens" must be dict. Ignoring') continue __when = self.opts['pillar']['whens'][i] try: when__ = dateutil_parser.parse(__when) except ValueError: log.error('Invalid date string. Ignoring') continue elif (('whens' in self.opts['grains']) and (i in self.opts['grains']['whens'])): if (not isinstance(self.opts['grains']['whens'], dict)): log.error('Grain "whens" must be dict.Ignoring') continue __when = self.opts['grains']['whens'][i] try: when__ = dateutil_parser.parse(__when) except ValueError: log.error('Invalid date string. Ignoring') continue else: try: when__ = dateutil_parser.parse(i) except ValueError: log.error('Invalid date string {0}. Ignoring job {1}.'.format(i, job)) continue _when.append(int(time.mktime(when__.timetuple()))) if data['_splay']: _when.append(data['_splay']) _when.sort() for i in _when: if ((i < now) and (len(_when) > 1)): _when.remove(i) if _when: when = _when[0] if ('_run' not in data): data['_run'] = bool((when >= now)) if (not data['_next_fire_time']): data['_next_fire_time'] = when if ((data['_next_fire_time'] < when) and (not data['_run'])): data['_next_fire_time'] = when data['_run'] = True elif (not data.get('_run', False)): data['_next_fire_time'] = None continue else: if (('pillar' in self.opts) and ('whens' in self.opts['pillar']) and (data['when'] in self.opts['pillar']['whens'])): if (not isinstance(self.opts['pillar']['whens'], dict)): log.error('Pillar item "whens" must be dict.Ignoring') continue _when = self.opts['pillar']['whens'][data['when']] try: when__ = dateutil_parser.parse(_when) except ValueError: log.error('Invalid date string. Ignoring') continue elif (('whens' in self.opts['grains']) and (data['when'] in self.opts['grains']['whens'])): if (not isinstance(self.opts['grains']['whens'], dict)): log.error('Grain "whens" must be dict. Ignoring') continue _when = self.opts['grains']['whens'][data['when']] try: when__ = dateutil_parser.parse(_when) except ValueError: log.error('Invalid date string. Ignoring') continue else: try: when__ = dateutil_parser.parse(data['when']) except ValueError: log.error('Invalid date string. Ignoring') continue when = int(time.mktime(when__.timetuple())) if ((when < now) and (not data.get('_run', False)) and (not data['_splay'])): data['_next_fire_time'] = None continue if ('_run' not in data): data['_run'] = True if (not data['_next_fire_time']): data['_next_fire_time'] = when if ((data['_next_fire_time'] < when) and (not data['_run'])): data['_next_fire_time'] = when data['_run'] = True elif ('cron' in data): if (not _CRON_SUPPORTED): log.error('Missing python-croniter. Ignoring job {0}'.format(job)) continue if (data['_next_fire_time'] is None): try: data['_next_fire_time'] = int(croniter.croniter(data['cron'], now).get_next()) except (ValueError, KeyError): log.error('Invalid cron string. Ignoring') continue interval = (now - data['_next_fire_time']) if ((interval >= 60) and (interval < self.loop_interval)): self.loop_interval = interval else: continue run = False seconds = (data['_next_fire_time'] - now) if data['_splay']: seconds = (data['_splay'] - now) if (seconds <= 0): if ('_seconds' in data): run = True elif (('when' in data) and data['_run']): data['_run'] = False run = True elif ('cron' in data): data['_next_fire_time'] = None run = True elif (seconds == 0): run = True if (('_run_on_start' in data) and data['_run_on_start']): run = True data['_run_on_start'] = False elif run: if (('splay' in data) and (not data['_splay'])): splay = _splay(data['splay']) if (now < (data['_next_fire_time'] + splay)): log.debug('schedule.handle_func: Adding splay of {0} seconds to next run.'.format(splay)) run = False data['_splay'] = (data['_next_fire_time'] + splay) if ('when' in data): data['_run'] = True if ('range' in data): if (not _RANGE_SUPPORTED): log.error('Missing python-dateutil. Ignoring job {0}'.format(job)) continue elif isinstance(data['range'], dict): try: start = int(time.mktime(dateutil_parser.parse(data['range']['start']).timetuple())) except ValueError: log.error('Invalid date string for start. Ignoring job {0}.'.format(job)) continue try: end = int(time.mktime(dateutil_parser.parse(data['range']['end']).timetuple())) except ValueError: log.error('Invalid date string for end. Ignoring job {0}.'.format(job)) continue if (end > start): if (('invert' in data['range']) and data['range']['invert']): if ((now <= start) or (now >= end)): run = True else: run = False elif (start <= now <= end): run = True else: run = False else: log.error('schedule.handle_func: Invalid range, end must be larger than start. Ignoring job {0}.'.format(job)) continue else: log.error('schedule.handle_func: Invalid, range must be specified as a dictionary. Ignoring job {0}.'.format(job)) continue if (not run): continue miss_msg = '' if (seconds < 0): miss_msg = ' (runtime missed by {0} seconds)'.format(abs(seconds)) log.info('Running scheduled job: {0}{1}'.format(job, miss_msg)) if (('jid_include' not in data) or data['jid_include']): data['jid_include'] = True log.debug('schedule: This job was scheduled with jid_include, adding to cache (jid_include defaults to True)') if ('maxrunning' in data): log.debug('schedule: This job was scheduled with a max number of {0}'.format(data['maxrunning'])) else: log.info('schedule: maxrunning parameter was not specified for job {0}, defaulting to 1.'.format(job)) data['maxrunning'] = 1 multiprocessing_enabled = self.opts.get('multiprocessing', True) if salt.utils.platform.is_windows(): functions = self.functions self.functions = {} returners = self.returners self.returners = {} try: if multiprocessing_enabled: thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess else: thread_cls = threading.Thread proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) if multiprocessing_enabled: with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): proc.start() else: proc.start() if multiprocessing_enabled: proc.join() finally: if ('_seconds' in data): data['_next_fire_time'] = (now + data['_seconds']) data['_splay'] = None if salt.utils.platform.is_windows(): self.functions = functions self.returners = returners
'Clear the dict'
def clear(self):
self._dict = getattr(self, 'mod_dict_class', dict)() self.loaded = False
'Load a single item if you have it'
def _load(self, key):
raise NotImplementedError()
'Load all of them'
def _load_all(self):
raise NotImplementedError()
'Whether or not the key is missing (meaning we know it\'s not there)'
def _missing(self, key):
return False
'Return the error string for a missing function. Override this to return a more meaningfull error message if possible'
def missing_fun_string(self, function_name):
return "'{0}' is not available.".format(function_name)
'Check if the key is ttld out, then do the get'
def __getitem__(self, key):
if self._missing(key): raise KeyError(key) if ((key not in self._dict) and (not self.loaded)): if self._load(key): log.debug('LazyLoaded %s', key) return self._dict[key] else: log.debug('Could not LazyLoad %s: %s', key, self.missing_fun_string(key)) raise KeyError(key) else: return self._dict[key]
'Execute the render system against a single reaction file and return the data structure'
def render_reaction(self, glob_ref, tag, data):
react = {} if glob_ref.startswith('salt://'): glob_ref = (self.minion.functions['cp.cache_file'](glob_ref) or '') globbed_ref = glob.glob(glob_ref) if (not globbed_ref): log.error('Can not render SLS {0} for tag {1}. File missing or not found.'.format(glob_ref, tag)) for fn_ in globbed_ref: try: res = self.render_template(fn_, tag=tag, data=data) for name in res: res[name]['__sls__'] = fn_ react.update(res) except Exception: log.error('Failed to render "{0}": '.format(fn_), exc_info=True) return react
'Take in the tag from an event and return a list of the reactors to process'
def list_reactors(self, tag):
log.debug('Gathering reactors for tag {0}'.format(tag)) reactors = [] if isinstance(self.opts['reactor'], six.string_types): try: with salt.utils.files.fopen(self.opts['reactor']) as fp_: react_map = yaml.safe_load(fp_.read()) except (OSError, IOError): log.error('Failed to read reactor map: "{0}"'.format(self.opts['reactor'])) except Exception: log.error('Failed to parse YAML in reactor map: "{0}"'.format(self.opts['reactor'])) else: react_map = self.opts['reactor'] for ropt in react_map: if (not isinstance(ropt, dict)): continue if (len(ropt) != 1): continue key = next(six.iterkeys(ropt)) val = ropt[key] if fnmatch.fnmatch(tag, key): if isinstance(val, six.string_types): reactors.append(val) elif isinstance(val, list): reactors.extend(val) return reactors
'Return a list of the reactors'
def list_all(self):
if isinstance(self.minion.opts['reactor'], six.string_types): log.debug('Reading reactors from yaml {0}'.format(self.opts['reactor'])) try: with salt.utils.files.fopen(self.opts['reactor']) as fp_: react_map = yaml.safe_load(fp_.read()) except (OSError, IOError): log.error('Failed to read reactor map: "{0}"'.format(self.opts['reactor'])) except Exception: log.error('Failed to parse YAML in reactor map: "{0}"'.format(self.opts['reactor'])) else: log.debug('Not reading reactors from yaml') react_map = self.minion.opts['reactor'] return react_map
'Add a reactor'
def add_reactor(self, tag, reaction):
reactors = self.list_all() for reactor in reactors: _tag = next(six.iterkeys(reactor)) if (_tag == tag): return {'status': False, 'comment': 'Reactor already exists.'} self.minion.opts['reactor'].append({tag: reaction}) return {'status': True, 'comment': 'Reactor added.'}
'Delete a reactor'
def delete_reactor(self, tag):
reactors = self.list_all() for reactor in reactors: _tag = next(six.iterkeys(reactor)) if (_tag == tag): self.minion.opts['reactor'].remove(reactor) return {'status': True, 'comment': 'Reactor deleted.'} return {'status': False, 'comment': 'Reactor does not exists.'}
'Render a list of reactor files and returns a reaction struct'
def reactions(self, tag, data, reactors):
log.debug('Compiling reactions for tag {0}'.format(tag)) high = {} chunks = [] try: for fn_ in reactors: high.update(self.render_reaction(fn_, tag, data)) if high: errors = self.verify_high(high) if errors: log.error('Unable to render reactions for event {0} due to errors ({1}) in one or more of the sls files ({2})'.format(tag, errors, reactors)) return [] chunks = self.order_chunks(self.compile_high_data(high)) except Exception as exc: log.error('Exception trying to compile reactions: {0}'.format(exc), exc_info=True) return chunks
'Execute the reaction state'
def call_reactions(self, chunks):
for chunk in chunks: self.wrap.run(chunk)
'Enter into the server loop'
def run(self):
salt.utils.appendproctitle(self.__class__.__name__) self.event = salt.utils.event.get_event(self.opts['__role'], self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=True) self.wrap = ReactWrap(self.opts) for data in self.event.iter_events(full=True): if (data['data'].get('user') == self.wrap.event_user): continue if data['tag'].endswith('salt/reactors/manage/add'): _data = data['data'] res = self.add_reactor(_data['event'], _data['reactors']) self.event.fire_event({'reactors': self.list_all(), 'result': res}, 'salt/reactors/manage/add-complete') elif data['tag'].endswith('salt/reactors/manage/delete'): _data = data['data'] res = self.delete_reactor(_data['event']) self.event.fire_event({'reactors': self.list_all(), 'result': res}, 'salt/reactors/manage/delete-complete') elif data['tag'].endswith('salt/reactors/manage/list'): self.event.fire_event({'reactors': self.list_all()}, 'salt/reactors/manage/list-results') else: reactors = self.list_reactors(data['tag']) if (not reactors): continue chunks = self.reactions(data['tag'], data['data'], reactors) if chunks: try: self.call_reactions(chunks) except SystemExit: log.warning('Exit ignored by reactor')
'Execute the specified function in the specified state by passing the low data'
def run(self, low):
l_fun = getattr(self, low['state']) try: f_call = salt.utils.format_call(l_fun, low) kwargs = f_call.get('kwargs', {}) if ('kwarg' not in kwargs): kwargs['kwarg'] = {} if (low['state'] in ('runner', 'wheel')): kwargs['__user__'] = self.event_user l_fun(*f_call.get('args', ()), **kwargs) except Exception: log.error('Failed to execute {0}: {1}\n'.format(low['state'], l_fun), exc_info=True)
'Wrap LocalClient for running :ref:`execution modules <all-salt.modules>`'
def local(self, *args, **kwargs):
if ('local' not in self.client_cache): self.client_cache['local'] = salt.client.LocalClient(self.opts['conf_file']) try: self.client_cache['local'].cmd_async(*args, **kwargs) except SystemExit: log.warning('Attempt to exit reactor. Ignored.') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
'Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>`'
def runner(self, fun, **kwargs):
if ('runner' not in self.client_cache): self.client_cache['runner'] = salt.runner.RunnerClient(self.opts) len(self.client_cache['runner'].functions) try: self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) except SystemExit: log.warning('Attempt to exit in reactor by runner. Ignored') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
'Wrap Wheel to enable executing :ref:`wheel modules <all-salt.wheel>`'
def wheel(self, fun, **kwargs):
if ('wheel' not in self.client_cache): self.client_cache['wheel'] = salt.wheel.Wheel(self.opts) len(self.client_cache['wheel'].functions) try: self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs)) except SystemExit: log.warning('Attempt to in reactor by whell. Ignored.') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
'Wrap Caller to enable executing :ref:`caller modules <all-salt.caller>`'
def caller(self, fun, *args, **kwargs):
log.debug('in caller with fun {0} args {1} kwargs {2}'.format(fun, args, kwargs)) args = kwargs.get('args', []) kwargs = kwargs.get('kwargs', {}) if ('caller' not in self.client_cache): self.client_cache['caller'] = salt.client.Caller(self.opts['conf_file']) try: self.client_cache['caller'].cmd(fun, *args, **kwargs) except SystemExit: log.warning('Attempt to exit reactor. Ignored.') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
'Enforce the TTL to a specific key, delete if its past TTL'
def _enforce_ttl_key(self, key):
if (key not in self._key_cache_time): return if ((time.time() - self._key_cache_time[key]) > self._ttl): del self._key_cache_time[key] dict.__delitem__(self, key)
'Check if the key is ttld out, then do the get'
def __getitem__(self, key):
self._enforce_ttl_key(key) return dict.__getitem__(self, key)
'Make sure to update the key cache time'
def __setitem__(self, key, val):
self._key_cache_time[key] = time.time() dict.__setitem__(self, key, val)
'Enforce the TTL to a specific key, delete if its past TTL'
def _enforce_ttl_key(self, key):
if (key not in self._key_cache_time): return if ((time.time() - self._key_cache_time[key]) > self._ttl): del self._key_cache_time[key] self._dict.__delitem__(key)
'Check if the key is ttld out, then do the get'
def __getitem__(self, key):
self._enforce_ttl_key(key) return self._dict.__getitem__(key)
'Make sure to update the key cache time'
def __setitem__(self, key, val):
self._key_cache_time[key] = time.time() self._dict.__setitem__(key, val) self._write()
'Make sure to remove the key cache time'
def __delitem__(self, key):
del self._key_cache_time[key] self._dict.__delitem__(key) self._write()
'Read in from disk'
def _read(self):
if ((not HAS_MSGPACK) or (not os.path.exists(self._path))): return with salt.utils.files.fopen(self._path, 'rb') as fp_: cache = msgpack.load(fp_, encoding=__salt_system_encoding__) if ('CacheDisk_cachetime' in cache): self._dict = cache['CacheDisk_data'] self._key_cache_time = cache['CacheDisk_cachetime'] else: self._dict = cache timestamp = os.path.getmtime(self._path) for key in self._dict: self._key_cache_time[key] = timestamp if log.isEnabledFor(logging.DEBUG): log.debug('Disk cache retrieved: {0}'.format(cache))
'Write out to disk'
def _write(self):
if (not HAS_MSGPACK): return with salt.utils.files.fopen(self._path, 'wb+') as fp_: cache = {'CacheDisk_data': self._dict, 'CacheDisk_cachetime': self._key_cache_time} msgpack.dump(cache, fp_, use_bin_type=True)
'Sets up the zmq-connection to the ConCache'
def __init__(self, opts):
self.opts = opts self.serial = salt.payload.Serial(self.opts.get('serial', '')) self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc') self.cache_upd_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc') context = zmq.Context() self.creq_out = context.socket(zmq.REQ) self.creq_out.setsockopt(zmq.LINGER, 100) self.creq_out.connect(('ipc://' + self.cache_sock)) self.cupd_out = context.socket(zmq.PUB) self.cupd_out.setsockopt(zmq.LINGER, 1) self.cupd_out.connect(('ipc://' + self.cache_upd_sock))
'published the given minions to the ConCache'
def put_cache(self, minions):
self.cupd_out.send(self.serial.dumps(minions))
'queries the ConCache for a list of currently connected minions'
def get_cached(self):
msg = self.serial.dumps('minions') self.creq_out.send(msg) min_list = self.serial.loads(self.creq_out.recv()) return min_list
'Clear the cache'
def clear(self):
self.cache.clear()
'Sweep the cache and remove the outdated or least frequently used entries'
def sweep(self):
if (self.max_age < (time.time() - self.timestamp)): self.clear() self.timestamp = time.time() else: paterns = list(self.cache.values()) paterns.sort() for idx in range(self.clear_size): del self.cache[paterns[idx][2]]
'Get a compiled regular expression object based on pattern and cache it when it is not in the cache already'
def get(self, pattern):
try: self.cache[pattern][0] += 1 return self.cache[pattern][1] except KeyError: pass if (len(self.cache) > self.size): self.sweep() regex = re.compile('{0}{1}{2}'.format(self.prepend, pattern, self.append)) self.cache[pattern] = [1, regex, pattern, time.time()] return regex
'Create a context cache'
def __init__(self, opts, name):
self.opts = opts self.cache_path = os.path.join(opts['cachedir'], 'context', '{0}.p'.format(name)) self.serial = salt.payload.Serial(self.opts)
'Cache the given context to disk'
def cache_context(self, context):
if (not os.path.isdir(os.path.dirname(self.cache_path))): os.mkdir(os.path.dirname(self.cache_path)) with salt.utils.files.fopen(self.cache_path, 'w+b') as cache: self.serial.dump(context, cache)
'Retrieve a context cache from disk'
def get_cache_context(self):
with salt.utils.files.fopen(self.cache_path, 'rb') as cache: return self.serial.load(cache)
'Process a list of strings, each corresponding to the recorded changes. Args: text: A list of lines of text (assumed to contain newlines) Returns: A tuple of the modified text and a textual description of what is done. Raises: ValueError: if substitution source location does not have expected text.'
def process(self, text):
change_report = '' for (line, edits) in self._line_to_edit.items(): offset = 0 edits.sort(key=(lambda x: x.start)) char_array = list(text[(line - 1)]) change_report += ('%r Line %d\n' % (self._filename, line)) change_report += (('-' * 80) + '\n\n') for e in edits: change_report += ('%s\n' % e.comment) change_report += ('\n Old: %s' % text[(line - 1)]) change_list = ([' '] * len(text[(line - 1)])) change_list_new = ([' '] * len(text[(line - 1)])) for e in edits: start_eff = (e.start + offset) end_eff = (start_eff + len(e.old)) old_actual = ''.join(char_array[start_eff:end_eff]) if (old_actual != e.old): raise ValueError(('Expected text %r but got %r' % (''.join(e.old), ''.join(old_actual)))) char_array[start_eff:end_eff] = list(e.new) change_list[e.start:(e.start + len(e.old))] = ('~' * len(e.old)) change_list_new[start_eff:end_eff] = ('~' * len(e.new)) offset += (len(e.new) - len(e.old)) change_report += (' %s\n' % ''.join(change_list)) text[(line - 1)] = ''.join(char_array) change_report += (' New: %s' % text[(line - 1)]) change_report += (' %s\n\n' % ''.join(change_list_new)) return (''.join(text), change_report, self._errors)
'Add a new change that is needed. Args: comment: A description of what was changed line: Line number (1 indexed) start: Column offset (0 indexed) old: old text new: new text error: this "edit" is something that cannot be fixed automatically Returns: None'
def add(self, comment, line, start, old, new, error=None):
self._line_to_edit[line].append(FileEditTuple(comment, line, start, old, new)) if error: self._errors.append(('%s:%d: %s' % (self._filename, line, error)))
'Traverse an attribute to generate a full name e.g. tf.foo.bar. Args: node: A Node of type Attribute. Returns: a \'.\'-delimited full-name or None if the tree was not a simple form. i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".'
def _get_attribute_full_path(self, node):
curr = node items = [] while (not isinstance(curr, ast.Name)): if (not isinstance(curr, ast.Attribute)): return None items.append(curr.attr) curr = curr.value items.append(curr.id) return '.'.join(reversed(items))
'Return correct line number and column offset for a given node. This is necessary mainly because ListComp\'s location reporting reports the next token after the list comprehension list opening. Args: node: Node for which we wish to know the lineno and col_offset'
def _find_true_position(self, node):
import re find_open = re.compile('^\\s*(\\[).*$') find_string_chars = re.compile('[\'"]') if isinstance(node, ast.ListComp): line = node.lineno col = node.col_offset while 1: text = self._lines[(line - 1)] reversed_preceding_text = text[:col][::(-1)] m = find_open.match(reversed_preceding_text) if m: new_col_offset = ((col - m.start(1)) - 1) return (line, new_col_offset) elif ((reversed_preceding_text == '') or reversed_preceding_text.isspace()): line = (line - 1) prev_line = self._lines[(line - 1)] comment_start = prev_line.find('#') if (comment_start == (-1)): col = (len(prev_line) - 1) elif (find_string_chars.search(prev_line[comment_start:]) is None): col = comment_start else: return (None, None) else: return (None, None) return (node.lineno, node.col_offset)
'Handle visiting a call node in the AST. Args: node: Current Node'
def visit_Call(self, node):
full_name = self._get_attribute_full_path(node.func) node.func.is_function_for_call = True if (full_name and full_name.startswith('tf.')): function_handles = self._api_change_spec.function_handle if (full_name in function_handles): function_handles[full_name](self._file_edit, node) function_reorders = self._api_change_spec.function_reorders function_keyword_renames = self._api_change_spec.function_keyword_renames if (full_name in function_reorders): reordered = function_reorders[full_name] for (idx, arg) in enumerate(node.args): (lineno, col_offset) = self._find_true_position(arg) if ((lineno is None) or (col_offset is None)): self._file_edit.add(('Failed to add keyword %r to reordered function %r' % (reordered[idx], full_name)), arg.lineno, arg.col_offset, '', '', error='A necessary keyword argument failed to be inserted.') else: keyword_arg = reordered[idx] if ((full_name in function_keyword_renames) and (keyword_arg in function_keyword_renames[full_name])): keyword_arg = function_keyword_renames[full_name][keyword_arg] self._file_edit.add(('Added keyword %r to reordered function %r' % (reordered[idx], full_name)), lineno, col_offset, '', (keyword_arg + '=')) renamed_keywords = ({} if (full_name not in function_keyword_renames) else function_keyword_renames[full_name]) for keyword in node.keywords: argkey = keyword.arg argval = keyword.value if (argkey in renamed_keywords): (argval_lineno, argval_col_offset) = self._find_true_position(argval) if ((argval_lineno is not None) and (argval_col_offset is not None)): key_start = ((argval_col_offset - len(argkey)) - 1) key_end = ((key_start + len(argkey)) + 1) if (self._lines[(argval_lineno - 1)][key_start:key_end] == (argkey + '=')): self._file_edit.add(('Renamed keyword argument from %r to %r' % (argkey, renamed_keywords[argkey])), argval_lineno, ((argval_col_offset - len(argkey)) - 1), (argkey + '='), (renamed_keywords[argkey] + '=')) continue self._file_edit.add(('Failed to rename keyword argument from %r to %r' % (argkey, renamed_keywords[argkey])), argval.lineno, ((argval.col_offset - len(argkey)) - 1), '', '', error='Failed to find keyword lexographically. Fix manually.') ast.NodeVisitor.generic_visit(self, node)
'Handle bare Attributes i.e. [tf.foo, tf.bar]. Args: node: Node that is of type ast.Attribute'
def visit_Attribute(self, node):
full_name = self._get_attribute_full_path(node) if (full_name and full_name.startswith('tf.')): self._rename_functions(node, full_name) if (full_name in self._api_change_spec.change_to_function): if (not hasattr(node, 'is_function_for_call')): new_text = (full_name + '()') self._file_edit.add(('Changed %r to %r' % (full_name, new_text)), node.lineno, node.col_offset, full_name, new_text) ast.NodeVisitor.generic_visit(self, node)
'Process the given python file for incompatible changes. Args: in_filename: filename to parse out_filename: output file to write to Returns: A tuple representing number of files processed, log of actions, errors'
def process_file(self, in_filename, out_filename):
with open(in_filename, 'r') as in_file: with tempfile.NamedTemporaryFile('w', delete=False) as temp_file: ret = self.process_opened_file(in_filename, in_file, out_filename, temp_file) shutil.move(temp_file.name, out_filename) return ret
'Process the given python file for incompatible changes. This function is split out to facilitate StringIO testing from tf_upgrade_test.py. Args: in_filename: filename to parse in_file: opened file (or StringIO) out_filename: output file to write to out_file: opened file (or StringIO) Returns: A tuple representing number of files processed, log of actions, errors'
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
process_errors = [] text = (('-' * 80) + '\n') text += ('Processing file %r\n outputting to %r\n' % (in_filename, out_filename)) text += (('-' * 80) + '\n\n') parsed_ast = None lines = in_file.readlines() try: parsed_ast = ast.parse(''.join(lines)) except Exception: text += ('Failed to parse %r\n\n' % in_filename) text += traceback.format_exc() if parsed_ast: visitor = TensorFlowCallVisitor(in_filename, lines) visitor.visit(parsed_ast) (out_text, new_text, process_errors) = visitor.process(lines) text += new_text if out_file: out_file.write(out_text) text += '\n' return (1, text, process_errors)
'Processes upgrades on an entire tree of python files in place. Note that only Python files. If you have custom code in other languages, you will need to manually upgrade those. Args: root_directory: Directory to walk and process. output_root_directory: Directory to use as base Returns: A tuple of files processed, the report string ofr all files, and errors'
def process_tree(self, root_directory, output_root_directory):
if (output_root_directory and os.path.exists(output_root_directory)): print(('Output directory %r must not already exist.' % output_root_directory)) sys.exit(1) norm_root = os.path.split(os.path.normpath(root_directory)) norm_output = os.path.split(os.path.normpath(output_root_directory)) if (norm_root == norm_output): print(('Output directory %r same as input directory %r' % (root_directory, output_root_directory))) sys.exit(1) files_to_process = [] for (dir_name, _, file_list) in os.walk(root_directory): py_files = [f for f in file_list if f.endswith('.py')] for filename in py_files: fullpath = os.path.join(dir_name, filename) fullpath_output = os.path.join(output_root_directory, os.path.relpath(fullpath, root_directory)) files_to_process.append((fullpath, fullpath_output)) file_count = 0 tree_errors = [] report = '' report += (('=' * 80) + '\n') report += ('Input tree: %r\n' % root_directory) report += (('=' * 80) + '\n') for (input_path, output_path) in files_to_process: output_directory = os.path.dirname(output_path) if (not os.path.isdir(output_directory)): os.makedirs(output_directory) file_count += 1 (_, l_report, l_errors) = self.process_file(input_path, output_path) tree_errors += l_errors report += l_report return (file_count, report, tree_errors)
'Create a DeploymentConfig. The config describes how to deploy a model across multiple clones and replicas. The model will be replicated `num_clones` times in each replica. If `clone_on_cpu` is True, each clone will placed on CPU. If `num_replicas` is 1, the model is deployed via a single process. In that case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored. If `num_replicas` is greater than 1, then `worker_device` and `ps_device` must specify TensorFlow devices for the `worker` and `ps` jobs and `num_ps_tasks` must be positive. Args: num_clones: Number of model clones to deploy in each replica. clone_on_cpu: If True clones would be placed on CPU. replica_id: Integer. Index of the replica for which the model is deployed. Usually 0 for the chief replica. num_replicas: Number of replicas to use. num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas. worker_job_name: A name for the worker job. ps_job_name: A name for the parameter server job. Raises: ValueError: If the arguments are invalid.'
def __init__(self, num_clones=1, clone_on_cpu=False, replica_id=0, num_replicas=1, num_ps_tasks=0, worker_job_name='worker', ps_job_name='ps'):
if (num_replicas > 1): if (num_ps_tasks < 1): raise ValueError('When using replicas num_ps_tasks must be positive') if ((num_replicas > 1) or (num_ps_tasks > 0)): if (not worker_job_name): raise ValueError('Must specify worker_job_name when using replicas') if (not ps_job_name): raise ValueError('Must specify ps_job_name when using parameter server') if (replica_id >= num_replicas): raise ValueError('replica_id must be less than num_replicas') self._num_clones = num_clones self._clone_on_cpu = clone_on_cpu self._replica_id = replica_id self._num_replicas = num_replicas self._num_ps_tasks = num_ps_tasks self._ps_device = (('/job:' + ps_job_name) if (num_ps_tasks > 0) else '') self._worker_device = (('/job:' + worker_job_name) if (num_ps_tasks > 0) else '')
'Returns the device to use for caching variables. Variables are cached on the worker CPU when using replicas. Returns: A device string or None if the variables do not need to be cached.'
def caching_device(self):
if (self._num_ps_tasks > 0): return (lambda op: op.device) else: return None
'Device used to create the clone and all the ops inside the clone. Args: clone_index: Int, representing the clone_index. Returns: A value suitable for `tf.device()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".'
def clone_device(self, clone_index):
if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') device = '' if (self._num_ps_tasks > 0): device += self._worker_device if self._clone_on_cpu: device += '/device:CPU:0' elif (self._num_clones > 1): device += ('/device:GPU:%d' % clone_index) return device
'Name scope to create the clone. Args: clone_index: Int, representing the clone_index. Returns: A name_scope suitable for `tf.name_scope()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".'
def clone_scope(self, clone_index):
if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') scope = '' if (self._num_clones > 1): scope = ('clone_%d' % clone_index) return scope
'Device to use with the optimizer. Returns: A value suitable for `tf.device()`.'
def optimizer_device(self):
if ((self._num_ps_tasks > 0) or (self._num_clones > 0)): return (self._worker_device + '/device:CPU:0') else: return ''
'Device to use to build the inputs. Returns: A value suitable for `tf.device()`.'
def inputs_device(self):
device = '' if (self._num_ps_tasks > 0): device += self._worker_device device += '/device:CPU:0' return device
'Returns the device to use for variables created inside the clone. Returns: A value suitable for `tf.device()`.'
def variables_device(self):
device = '' if (self._num_ps_tasks > 0): device += self._ps_device device += '/device:CPU:0' class _PSDeviceChooser(object, ): 'Slim device chooser for variables when using PS.' def __init__(self, device, tasks): self._device = device self._tasks = tasks self._task = 0 def choose(self, op): if op.device: return op.device node_def = (op if isinstance(op, tf.NodeDef) else op.node_def) if (node_def.op == 'Variable'): t = self._task self._task = ((self._task + 1) % self._tasks) d = ('%s/task:%d' % (self._device, t)) return d else: return op.device if (not self._num_ps_tasks): return device else: chooser = _PSDeviceChooser(device, self._num_ps_tasks) return chooser.choose
'A plain ResNet without extra layers before or after the ResNet blocks.'
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]): with slim.arg_scope([slim.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = dict(tf.get_collection('end_points')) return (net, end_points)
'Test the end points of a tiny v1 bottleneck network.'
def testEndPointsV1(self):
bottleneck = resnet_v1.bottleneck blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])] inputs = create_test_input(2, 32, 16, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_plain(inputs, blocks, scope='tiny') expected = ['tiny/block1/unit_1/bottleneck_v1/shortcut', 'tiny/block1/unit_1/bottleneck_v1/conv1', 'tiny/block1/unit_1/bottleneck_v1/conv2', 'tiny/block1/unit_1/bottleneck_v1/conv3', 'tiny/block1/unit_2/bottleneck_v1/conv1', 'tiny/block1/unit_2/bottleneck_v1/conv2', 'tiny/block1/unit_2/bottleneck_v1/conv3', 'tiny/block2/unit_1/bottleneck_v1/shortcut', 'tiny/block2/unit_1/bottleneck_v1/conv1', 'tiny/block2/unit_1/bottleneck_v1/conv2', 'tiny/block2/unit_1/bottleneck_v1/conv3', 'tiny/block2/unit_2/bottleneck_v1/conv1', 'tiny/block2/unit_2/bottleneck_v1/conv2', 'tiny/block2/unit_2/bottleneck_v1/conv3'] self.assertItemsEqual(expected, end_points)
'A simplified ResNet Block stacker without output stride control.'
def _stack_blocks_nondense(self, net, blocks):
for block in blocks: with tf.variable_scope(block.scope, 'block', [net]): for (i, unit) in enumerate(block.args): (depth, depth_bottleneck, stride) = unit with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = block.unit_fn(net, depth=depth, depth_bottleneck=depth_bottleneck, stride=stride, rate=1) return net
'Verify the values of dense feature extraction by atrous convolution. Make sure that dense feature extraction by stack_blocks_dense() followed by subsampling gives identical results to feature extraction at the nominal network output stride using the simple self._stack_blocks_nondense() above. Args: bottleneck: The bottleneck function.'
def _atrousValues(self, bottleneck):
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]), resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]), resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])] nominal_stride = 8 height = 30 width = 31 with slim.arg_scope(resnet_utils.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=False): for output_stride in [1, 2, 4, 8, None]: with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(1, height, width, 3) output = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() expected = self._stack_blocks_nondense(inputs, blocks) sess.run(tf.global_variables_initializer()) (output, expected) = sess.run([output, expected]) self.assertAllClose(output, expected, atol=0.0001, rtol=0.0001)
'A shallow and thin ResNet v1 for faster tests.'
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope='resnet_v1_small'):
bottleneck = resnet_v1.bottleneck blocks = [resnet_utils.Block('block1', bottleneck, (([(4, 1, 1)] * 2) + [(4, 1, 2)])), resnet_utils.Block('block2', bottleneck, (([(8, 2, 1)] * 2) + [(8, 2, 2)])), resnet_utils.Block('block3', bottleneck, (([(16, 4, 1)] * 2) + [(16, 4, 2)])), resnet_utils.Block('block4', bottleneck, ([(32, 8, 1)] * 2))] return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope)
'Verify dense feature extraction with atrous convolution.'
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) (output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() (expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001)
'A plain ResNet without extra layers before or after the ResNet blocks.'
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]): with slim.arg_scope([slim.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = dict(tf.get_collection('end_points')) return (net, end_points)
'Test the end points of a tiny v2 bottleneck network.'
def testEndPointsV2(self):
bottleneck = resnet_v2.bottleneck blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])] inputs = create_test_input(2, 32, 16, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_plain(inputs, blocks, scope='tiny') expected = ['tiny/block1/unit_1/bottleneck_v2/shortcut', 'tiny/block1/unit_1/bottleneck_v2/conv1', 'tiny/block1/unit_1/bottleneck_v2/conv2', 'tiny/block1/unit_1/bottleneck_v2/conv3', 'tiny/block1/unit_2/bottleneck_v2/conv1', 'tiny/block1/unit_2/bottleneck_v2/conv2', 'tiny/block1/unit_2/bottleneck_v2/conv3', 'tiny/block2/unit_1/bottleneck_v2/shortcut', 'tiny/block2/unit_1/bottleneck_v2/conv1', 'tiny/block2/unit_1/bottleneck_v2/conv2', 'tiny/block2/unit_1/bottleneck_v2/conv3', 'tiny/block2/unit_2/bottleneck_v2/conv1', 'tiny/block2/unit_2/bottleneck_v2/conv2', 'tiny/block2/unit_2/bottleneck_v2/conv3'] self.assertItemsEqual(expected, end_points)
'A simplified ResNet Block stacker without output stride control.'
def _stack_blocks_nondense(self, net, blocks):
for block in blocks: with tf.variable_scope(block.scope, 'block', [net]): for (i, unit) in enumerate(block.args): (depth, depth_bottleneck, stride) = unit with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = block.unit_fn(net, depth=depth, depth_bottleneck=depth_bottleneck, stride=stride, rate=1) return net
'Verify the values of dense feature extraction by atrous convolution. Make sure that dense feature extraction by stack_blocks_dense() followed by subsampling gives identical results to feature extraction at the nominal network output stride using the simple self._stack_blocks_nondense() above. Args: bottleneck: The bottleneck function.'
def _atrousValues(self, bottleneck):
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]), resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]), resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])] nominal_stride = 8 height = 30 width = 31 with slim.arg_scope(resnet_utils.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=False): for output_stride in [1, 2, 4, 8, None]: with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(1, height, width, 3) output = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() expected = self._stack_blocks_nondense(inputs, blocks) sess.run(tf.global_variables_initializer()) (output, expected) = sess.run([output, expected]) self.assertAllClose(output, expected, atol=0.0001, rtol=0.0001)
'A shallow and thin ResNet v2 for faster tests.'
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope='resnet_v2_small'):
bottleneck = resnet_v2.bottleneck blocks = [resnet_utils.Block('block1', bottleneck, (([(4, 1, 1)] * 2) + [(4, 1, 2)])), resnet_utils.Block('block2', bottleneck, (([(8, 2, 1)] * 2) + [(8, 2, 2)])), resnet_utils.Block('block3', bottleneck, (([(16, 4, 1)] * 2) + [(16, 4, 2)])), resnet_utils.Block('block4', bottleneck, ([(32, 8, 1)] * 2))] return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope)
'Verify dense feature extraction with atrous convolution.'
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) (output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() (expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001)
'Return the next `batch_size` examples from this data set.'
def next_batch(self, batch_size, fake_data=False):
if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return ([fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)]) start = self._index_in_epoch self._index_in_epoch += batch_size if (self._index_in_epoch > self._num_examples): self._epochs_completed += 1 perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] start = 0 self._index_in_epoch = batch_size assert (batch_size <= self._num_examples) end = self._index_in_epoch return (self._images[start:end], self._labels[start:end])
'get all tokens of the corpus'
def __get_all_tokens(self):
fwrite = open(self.data_path.replace('all_title.csv', 'all_token.csv'), 'w') with open(self.data_path, 'r') as fread: i = 0 for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] self.labels.append(label) text = line_list[1] text_tokens = self.cut_doc_obj.run(text) self.corpus.append(text_tokens) self.dictionary.add_documents([text_tokens]) fwrite.write((((label + ' DCTB ') + '\\'.join(text_tokens)) + '\n')) i += 1 except BaseException as e: msg = traceback.format_exc() print msg print '=====>Read Done<======' break self.token_len = self.dictionary.__len__() print ('all token len ' + str(self.token_len)) self.num_data = i fwrite.close()
'vec: get a vec representation of bow'
def vec(self):
self.__get_all_tokens() print 'before filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.__filter_tokens() print 'After filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.bow = [] for file_token in self.corpus: file_bow = self.dictionary.doc2bow(file_token) self.bow.append(file_bow) bow_vec_file = open(self.data_path.replace('all_title.csv', 'bow_vec.pl'), 'wb') pickle.dump(self.bow, bow_vec_file) bow_vec_file.close() bow_label_file = open(self.data_path.replace('all_title.csv', 'bow_label.pl'), 'wb') pickle.dump(self.labels, bow_label_file) bow_label_file.close()