desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Fetches a list of all configured VPN services for a tenant'
def list_vpnservices(self, retrieve_all=True, **kwargs):
return self.network_conn.list_vpnservices(retrieve_all, **kwargs)
'Fetches information of a specific VPN service'
def show_vpnservice(self, vpnservice, **kwargs):
vpnservice_id = self._find_vpnservice_id(vpnservice) return self.network_conn.show_vpnservice(vpnservice_id, **kwargs)
'Creates a new VPN service'
def create_vpnservice(self, subnet, router, name, admin_state_up=True):
subnet_id = self._find_subnet_id(subnet) router_id = self._find_router_id(router) body = {'subnet_id': subnet_id, 'router_id': router_id, 'name': name, 'admin_state_up': admin_state_up} return self.network_conn.create_vpnservice(body={'vpnservice': body})
'Updates a VPN service'
def update_vpnservice(self, vpnservice, desc):
vpnservice_id = self._find_vpnservice_id(vpnservice) body = {'description': desc} return self.network_conn.update_vpnservice(vpnservice_id, body={'vpnservice': body})
'Deletes the specified VPN service'
def delete_vpnservice(self, vpnservice):
vpnservice_id = self._find_vpnservice_id(vpnservice) ret = self.network_conn.delete_vpnservice(vpnservice_id) return (ret if ret else True)
'Fetches all configured IPsec Site Connections for a tenant'
def list_ipsec_site_connections(self):
return self.network_conn.list_ipsec_site_connections()
'Fetches information of a specific IPsecSiteConnection'
def show_ipsec_site_connection(self, ipsec_site_connection):
return self._fetch_ipsec_site_connection(ipsec_site_connection)
'Creates a new IPsecSiteConnection'
def create_ipsec_site_connection(self, name, ipsecpolicy, ikepolicy, vpnservice, peer_cidrs, peer_address, peer_id, psk, admin_state_up=True, **kwargs):
ipsecpolicy_id = self._find_ipsecpolicy_id(ipsecpolicy) ikepolicy_id = self._find_ikepolicy_id(ikepolicy) vpnservice_id = self._find_vpnservice_id(vpnservice) body = {'psk': psk, 'ipsecpolicy_id': ipsecpolicy_id, 'admin_state_up': admin_state_up, 'peer_cidrs': [peer_cidrs], 'ikepolicy_id': ikepolicy_id, 'vpnservice_id': vpnservice_id, 'peer_address': peer_address, 'peer_id': peer_id, 'name': name} if ('initiator' in kwargs): body['initiator'] = kwargs['initiator'] if ('mtu' in kwargs): body['mtu'] = kwargs['mtu'] if ('dpd_action' in kwargs): body['dpd'] = {'action': kwargs['dpd_action']} if ('dpd_interval' in kwargs): if ('dpd' not in body): body['dpd'] = {} body['dpd']['interval'] = kwargs['dpd_interval'] if ('dpd_timeout' in kwargs): if ('dpd' not in body): body['dpd'] = {} body['dpd']['timeout'] = kwargs['dpd_timeout'] return self.network_conn.create_ipsec_site_connection(body={'ipsec_site_connection': body})
'Deletes the specified IPsecSiteConnection'
def delete_ipsec_site_connection(self, ipsec_site_connection):
ipsec_site_connection_id = self._find_ipsec_site_connection_id(ipsec_site_connection) ret = self.network_conn.delete_ipsec_site_connection(ipsec_site_connection_id) return (ret if ret else True)
'Fetches a list of all configured IKEPolicies for a tenant'
def list_ikepolicies(self):
return self.network_conn.list_ikepolicies()
'Fetches information of a specific IKEPolicy'
def show_ikepolicy(self, ikepolicy):
return self._fetch_ikepolicy(ikepolicy)
'Creates a new IKEPolicy'
def create_ikepolicy(self, name, **kwargs):
body = {'name': name} if ('phase1_negotiation_mode' in kwargs): body['phase1_negotiation_mode'] = kwargs['phase1_negotiation_mode'] if ('auth_algorithm' in kwargs): body['auth_algorithm'] = kwargs['auth_algorithm'] if ('encryption_algorithm' in kwargs): body['encryption_algorithm'] = kwargs['encryption_algorithm'] if ('pfs' in kwargs): body['pfs'] = kwargs['pfs'] if ('ike_version' in kwargs): body['ike_version'] = kwargs['ike_version'] if ('units' in kwargs): body['lifetime'] = {'units': kwargs['units']} if ('value' in kwargs): if ('lifetime' not in body): body['lifetime'] = {} body['lifetime']['value'] = kwargs['value'] return self.network_conn.create_ikepolicy(body={'ikepolicy': body})
'Deletes the specified IKEPolicy'
def delete_ikepolicy(self, ikepolicy):
ikepolicy_id = self._find_ikepolicy_id(ikepolicy) ret = self.network_conn.delete_ikepolicy(ikepolicy_id) return (ret if ret else True)
'Fetches a list of all configured IPsecPolicies for a tenant'
def list_ipsecpolicies(self):
return self.network_conn.list_ipsecpolicies()
'Fetches information of a specific IPsecPolicy'
def show_ipsecpolicy(self, ipsecpolicy):
return self._fetch_ipsecpolicy(ipsecpolicy)
'Creates a new IPsecPolicy'
def create_ipsecpolicy(self, name, **kwargs):
body = {'name': name} if ('transform_protocol' in kwargs): body['transform_protocol'] = kwargs['transform_protocol'] if ('auth_algorithm' in kwargs): body['auth_algorithm'] = kwargs['auth_algorithm'] if ('encapsulation_mode' in kwargs): body['encapsulation_mode'] = kwargs['encapsulation_mode'] if ('encryption_algorithm' in kwargs): body['encryption_algorithm'] = kwargs['encryption_algorithm'] if ('pfs' in kwargs): body['pfs'] = kwargs['pfs'] if ('units' in kwargs): body['lifetime'] = {'units': kwargs['units']} if ('value' in kwargs): if ('lifetime' not in body): body['lifetime'] = {} body['lifetime']['value'] = kwargs['value'] return self.network_conn.create_ipsecpolicy(body={'ipsecpolicy': body})
'Deletes the specified IPsecPolicy'
def delete_ipsecpolicy(self, ipseecpolicy):
ipseecpolicy_id = self._find_ipsecpolicy_id(ipseecpolicy) ret = self.network_conn.delete_ipsecpolicy(ipseecpolicy_id) return (ret if ret else True)
'Fetches a list of all configured firewall rules for a tenant'
def list_firewall_rules(self):
return self.network_conn.list_firewall_rules()
'Fetches information of a specific firewall rule'
def show_firewall_rule(self, firewall_rule):
return self._fetch_firewall_rule(firewall_rule)
'Create a new firlwall rule'
def create_firewall_rule(self, protocol, action, **kwargs):
body = {'protocol': protocol, 'action': action} if ('tenant_id' in kwargs): body['tenant_id'] = kwargs['tenant_id'] if ('name' in kwargs): body['name'] = kwargs['name'] if ('description' in kwargs): body['description'] = kwargs['description'] if ('ip_version' in kwargs): body['ip_version'] = kwargs['ip_version'] if ('source_ip_address' in kwargs): body['source_ip_address'] = kwargs['source_ip_address'] if ('destination_port' in kwargs): body['destination_port'] = kwargs['destination_port'] if ('shared' in kwargs): body['shared'] = kwargs['shared'] if ('enabled' in kwargs): body['enabled'] = kwargs['enabled'] return self.network_conn.create_firewall_rule(body={'firewall_rule': body})
'Deletes the specified firewall rule'
def delete_firewall_rule(self, firewall_rule):
firewall_rule_id = self._find_firewall_rule_id(firewall_rule) ret = self.network_conn.delete_firewall_rule(firewall_rule_id) return (ret if ret else True)
'Update a firewall rule'
def update_firewall_rule(self, firewall_rule, protocol=None, action=None, name=None, description=None, ip_version=None, source_ip_address=None, destination_ip_address=None, source_port=None, destination_port=None, shared=None, enabled=None):
body = {} if protocol: body['protocol'] = protocol if action: body['action'] = action if name: body['name'] = name if description: body['description'] = description if ip_version: body['ip_version'] = ip_version if source_ip_address: body['source_ip_address'] = source_ip_address if destination_ip_address: body['destination_ip_address'] = destination_ip_address if source_port: body['source_port'] = source_port if destination_port: body['destination_port'] = destination_port if shared: body['shared'] = shared if enabled: body['enabled'] = enabled return self.network_conn.update_firewall_rule(firewall_rule, body={'firewall_rule': body})
'Fetches a list of all firewalls for a tenant'
def list_firewalls(self):
return self.network_conn.list_firewalls()
'Fetches information of a specific firewall'
def show_firewall(self, firewall):
return self._fetch_firewall(firewall)
'List L3 agents.'
def list_l3_agent_hosting_routers(self, router):
return self.network_conn.list_l3_agent_hosting_routers(router)
'List agents.'
def list_agents(self):
return self.network_conn.list_agents()
'Send data to the terminal. You are responsible to send any required line feeds.'
def send(self, data):
return self._send(data)
'Send the provided data to the terminal appending a line feed.'
def sendline(self, data, linesep=os.linesep):
return self.send('{0}{1}'.format(data, linesep))
'Receive data from the terminal as a (``stdout``, ``stderr``) tuple. If any of those is ``None`` we can no longer communicate with the terminal\'s child process.'
def recv(self, maxsize=None):
if (maxsize is None): maxsize = 1024 elif (maxsize < 1): maxsize = 1 return self._recv(maxsize)
'Close the communication with the terminal\'s child. If ``terminate`` is ``True`` then additionally try to terminate the terminal, and if ``kill`` is also ``True``, kill the terminal if terminating it was not enough.'
def close(self, terminate=True, kill=False):
if (not self.closed): if (self.child_fd is not None): os.close(self.child_fd) self.child_fd = None if (self.child_fde is not None): os.close(self.child_fde) self.child_fde = None time.sleep(0.1) if terminate: if (not self.terminate(kill)): raise TerminalException('Failed to terminate child process.') self.closed = True
'wait for subprocess to terminate and return subprocess\' return code. If timeout is reached, throw TimedProcTimeoutError'
def run(self):
def receive(): if self.with_communicate: (self.stdout, self.stderr) = self.process.communicate(input=self.stdin) elif self.wait: self.process.wait() if (not self.timeout): receive() else: rt = threading.Thread(target=receive) rt.start() rt.join(self.timeout) if rt.isAlive(): self.process.kill() def terminate(): if rt.isAlive(): self.process.terminate() threading.Timer(10, terminate).start() raise salt.exceptions.TimedProcTimeoutError('{0} : Timed out after {1} seconds'.format(self.command, str(self.timeout))) return self.process.returncode
':param IOLoop io_loop: Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. :param Bool keep_loop: Pass a boolean to determine if we want to keep the io loop or destroy it when the event handle is destroyed. This is useful when using event loops from within third party async code'
def __init__(self, node, sock_dir=None, opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):
self.serial = salt.payload.Serial({'serial': 'msgpack'}) self.keep_loop = keep_loop if (io_loop is not None): self.io_loop = io_loop self._run_io_loop_sync = False else: self.io_loop = tornado.ioloop.IOLoop() self._run_io_loop_sync = True self.cpub = False self.cpush = False self.subscriber = None self.pusher = None self.raise_errors = raise_errors if (opts is None): opts = {} if (node == 'master'): self.opts = salt.config.DEFAULT_MASTER_OPTS.copy() else: self.opts = salt.config.DEFAULT_MINION_OPTS.copy() self.opts.update(opts) if (sock_dir is None): sock_dir = self.opts['sock_dir'] else: self.opts['sock_dir'] = sock_dir if (salt.utils.platform.is_windows() and ('ipc_mode' not in opts)): self.opts['ipc_mode'] = 'tcp' (self.puburi, self.pulluri) = self.__load_uri(sock_dir, node) self.pending_tags = [] self.pending_events = [] self.__load_cache_regex() if (listen and (not self.cpub)): self.connect_pub()
'Initialize the regular expression cache and put it in the class namespace. The regex search strings will be prepend with \'^\''
@classmethod def __load_cache_regex(cls):
cls.cache_regex = salt.utils.cache.CacheRegex(prepend='^')
'Return the string URI for the location of the pull and pub sockets to use for firing and listening to events'
def __load_uri(self, sock_dir, node):
if (node == 'master'): if (self.opts['ipc_mode'] == 'tcp'): puburi = int(self.opts['tcp_master_pub_port']) pulluri = int(self.opts['tcp_master_pull_port']) else: puburi = os.path.join(sock_dir, 'master_event_pub.ipc') pulluri = os.path.join(sock_dir, 'master_event_pull.ipc') elif (self.opts['ipc_mode'] == 'tcp'): puburi = int(self.opts['tcp_pub_port']) pulluri = int(self.opts['tcp_pull_port']) else: hash_type = getattr(hashlib, self.opts['hash_type']) id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join(sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash)) pulluri = os.path.join(sock_dir, 'minion_event_{0}_pull.ipc'.format(id_hash)) log.debug('{0} PUB socket URI: {1}'.format(self.__class__.__name__, puburi)) log.debug('{0} PULL socket URI: {1}'.format(self.__class__.__name__, pulluri)) return (puburi, pulluri)
'Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event.'
def subscribe(self, tag=None, match_type=None):
if (tag is None): return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func])
'Un-subscribe to events matching the passed tag.'
def unsubscribe(self, tag, match_type=None):
if (tag is None): return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any((pmatch_func(evt['tag'], ptag) for (ptag, pmatch_func) in self.pending_tags)): self.pending_events.append(evt)
'Establish the publish connection'
def connect_pub(self, timeout=None):
if self.cpub: return True if self._run_io_loop_sync: with salt.utils.async.current_ioloop(self.io_loop): if (self.subscriber is None): self.subscriber = salt.transport.ipc.IPCMessageSubscriber(self.puburi, io_loop=self.io_loop) try: self.io_loop.run_sync((lambda : self.subscriber.connect(timeout=timeout))) self.cpub = True except Exception: pass else: if (self.subscriber is None): self.subscriber = salt.transport.ipc.IPCMessageSubscriber(self.puburi, io_loop=self.io_loop) self.cpub = True return self.cpub
'Close the publish connection (if established)'
def close_pub(self):
if (not self.cpub): return self.subscriber.close() self.subscriber = None self.pending_events = [] self.cpub = False
'Establish a connection with the event pull socket Default timeout is 1 s'
def connect_pull(self, timeout=1):
if self.cpush: return True if self._run_io_loop_sync: with salt.utils.async.current_ioloop(self.io_loop): if (self.pusher is None): self.pusher = salt.transport.ipc.IPCMessageClient(self.pulluri, io_loop=self.io_loop) try: self.io_loop.run_sync((lambda : self.pusher.connect(timeout=timeout))) self.cpush = True except Exception: pass else: if (self.pusher is None): self.pusher = salt.transport.ipc.IPCMessageClient(self.pulluri, io_loop=self.io_loop) self.cpush = True return self.cpush
'Check the pending_events list for events that match the tag :param tag: The tag to search for :type tag: str :param tags_regex: List of re expressions to search for also :type tags_regex: list[re.compile()] :return:'
def _check_pending(self, tag, match_func=None):
if (match_func is None): match_func = self._get_match_func() old_events = self.pending_events self.pending_events = [] ret = None for evt in old_events: if match_func(evt['tag'], tag): if (ret is None): ret = evt log.trace('get_event() returning cached event = {0}'.format(ret)) else: self.pending_events.append(evt) elif any((pmatch_func(evt['tag'], ptag) for (ptag, pmatch_func) in self.pending_tags)): self.pending_events.append(evt) else: log.trace('get_event() discarding cached event that no longer has any subscriptions = {0}'.format(evt)) return ret
'Check if the event_tag matches the search check. Uses startswith to check. Return True (matches) or False (no match)'
@staticmethod def _match_tag_startswith(event_tag, search_tag):
return event_tag.startswith(search_tag)
'Check if the event_tag matches the search check. Uses endswith to check. Return True (matches) or False (no match)'
@staticmethod def _match_tag_endswith(event_tag, search_tag):
return event_tag.endswith(search_tag)
'Check if the event_tag matches the search check. Uses find to check. Return True (matches) or False (no match)'
@staticmethod def _match_tag_find(event_tag, search_tag):
return (event_tag.find(search_tag) >= 0)
'Check if the event_tag matches the search check. Uses regular expression search to check. Return True (matches) or False (no match)'
def _match_tag_regex(self, event_tag, search_tag):
return (self.cache_regex.get(search_tag).search(event_tag) is not None)
'Check if the event_tag matches the search check. Uses fnmatch to check. Return True (matches) or False (no match)'
def _match_tag_fnmatch(self, event_tag, search_tag):
return fnmatch.fnmatch(event_tag, search_tag)
'Get a single publication. IF no publication available THEN block for up to wait seconds AND either return publication OR None IF no publication available. IF wait is 0 then block forever. tag Only return events matching the given tag. If not specified, or set to an empty string, all events are returned. It is recommended to always be selective on what is to be returned in the event that multiple requests are being multiplexed match_type Set the function to match the search tag with event tags. - \'startswith\' : search for event tags that start with tag - \'endswith\' : search for event tags that end with tag - \'find\' : search for event tags that contain tag - \'regex\' : regex search \'^\' + tag event tags - \'fnmatch\' : fnmatch tag event tags matching Default is opts[\'event_match_type\'] or \'startswith\' .. versionadded:: 2015.8.0 no_block Define if getting the event should be a blocking call or not. Defaults to False to keep backwards compatibility. .. versionadded:: 2015.8.0 Notes: Searches cached publications first. If no cached publications are found that match the given tag specification, new publications are received and checked. If a publication is received that does not match the tag specification, it is DISCARDED unless it is subscribed to via subscribe() which will cause it to be cached. If a caller is not going to call get_event immediately after sending a request, it MUST subscribe the result to ensure the response is not lost should other regions of code call get_event for other purposes.'
def get_event(self, wait=5, tag='', full=False, match_type=None, no_block=False, auto_reconnect=False):
assert self._run_io_loop_sync match_func = self._get_match_func(match_type) ret = self._check_pending(tag, match_func) if (ret is None): with salt.utils.async.current_ioloop(self.io_loop): if auto_reconnect: raise_errors = self.raise_errors self.raise_errors = True while True: try: ret = self._get_event(wait, tag, match_func, no_block) break except tornado.iostream.StreamClosedError: self.close_pub() self.connect_pub(timeout=wait) continue self.raise_errors = raise_errors else: ret = self._get_event(wait, tag, match_func, no_block) if ((ret is None) or full): return ret else: return ret['data']
'Get the raw event without blocking or any other niceties'
def get_event_noblock(self):
assert self._run_io_loop_sync if (not self.cpub): if (not self.connect_pub()): return None raw = self.subscriber.read_sync(timeout=0) if (raw is None): return None (mtag, data) = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag}
'Get the raw event in a blocking fashion Slower, but decreases the possibility of dropped events'
def get_event_block(self):
assert self._run_io_loop_sync if (not self.cpub): if (not self.connect_pub()): return None raw = self.subscriber.read_sync(timeout=None) if (raw is None): return None (mtag, data) = self.unpack(raw, self.serial) return {'data': data, 'tag': mtag}
'Creates a generator that continuously listens for events'
def iter_events(self, tag='', full=False, match_type=None, auto_reconnect=False):
while True: data = self.get_event(tag=tag, full=full, match_type=match_type, auto_reconnect=auto_reconnect) if (data is None): continue (yield data)
'Send a single event into the publisher with payload dict "data" and event identifier "tag" The default is 1000 ms'
def fire_event(self, data, tag, timeout=1000):
if (not str(tag)): raise ValueError('Empty tag.') if (not isinstance(data, MutableMapping)): raise ValueError("Dict object expected, not '{0}'.".format(data)) if (not self.cpush): if (timeout is not None): timeout_s = (float(timeout) / 1000) else: timeout_s = None if (not self.connect_pull(timeout=timeout_s)): return False data['_stamp'] = datetime.datetime.utcnow().isoformat() tagend = TAGEND if six.PY2: dump_data = self.serial.dumps(data) else: dump_data = self.serial.dumps(data, use_bin_type=True) serialized_data = salt.utils.dicttrim.trim_dict(dump_data, self.opts['max_event_size'], is_msgpacked=True, use_bin_type=six.PY3) log.debug('Sending event: tag = {0}; data = {1}'.format(tag, data)) if six.PY2: event = '{0}{1}{2}'.format(tag, tagend, serialized_data) else: event = ''.join([salt.utils.stringutils.to_bytes(tag), salt.utils.stringutils.to_bytes(tagend), serialized_data]) msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.async.current_ioloop(self.io_loop): try: self.io_loop.run_sync((lambda : self.pusher.send(msg))) except Exception as ex: log.debug(ex) raise else: self.io_loop.spawn_callback(self.pusher.send, msg) return True
'Send a single event to the master, with the payload "data" and the event identifier "tag". Default timeout is 1000ms'
def fire_master(self, data, tag, timeout=1000):
msg = {'tag': tag, 'data': data, 'events': None, 'pretag': None} return self.fire_event(msg, 'fire_master', timeout)
'Helper function for fire_ret_load'
def _fire_ret_load_specific_fun(self, load, fun_index=0):
if isinstance(load['fun'], list): fun = load['fun'][fun_index] if isinstance(load['retcode'], list): ret = load.get('return') if (isinstance(ret, list) and (len(ret) > fun_index)): ret = ret[fun_index] else: ret = {} retcode = load['retcode'][fun_index] else: ret = load.get('return', {}) ret = ret.get(fun, {}) retcode = load['retcode'][fun] else: fun = load['fun'] ret = load.get('return', {}) retcode = load['retcode'] try: for (tag, data) in six.iteritems(ret): data['retcode'] = retcode tags = tag.split('_|-') if (data.get('result') is False): self.fire_event(data, '{0}.{1}'.format(tags[0], tags[(-1)])) data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format(tags[0], tags[(-1)]) data['fun'] = fun data['user'] = load['user'] self.fire_event(data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) except Exception: pass
'Fire events based on information in the return load'
def fire_ret_load(self, load):
if (load.get('retcode') and load.get('fun')): if isinstance(load['fun'], list): if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if ((len(load['retcode']) > fun_index) and load['retcode'][fun_index] and (fun in SUB_EVENT)): self._fire_ret_load_specific_fun(load, fun_index) elif (load['retcode'].get(fun, 0) and (fun in SUB_EVENT)): self._fire_ret_load_specific_fun(load, fun_index) elif (load['fun'] in SUB_EVENT): self._fire_ret_load_specific_fun(load)
'Invoke the event_handler callback each time an event arrives.'
def set_event_handler(self, event_handler):
assert (not self._run_io_loop_sync) if (not self.cpub): self.connect_pub() return self.subscriber.read_async(event_handler)
'Get something from epull, publish it out epub, and return the package (or None)'
def handle_publish(self, package, _):
try: self.publisher.publish(package) return package except Exception: log.critical('Unexpected error while polling minion events', exc_info=True) return None
'Bind the pub and pull sockets for events'
def run(self):
salt.utils.appendproctitle(self.__class__.__name__) self.io_loop = tornado.ioloop.IOLoop() with salt.utils.async.current_ioloop(self.io_loop): if (self.opts['ipc_mode'] == 'tcp'): epub_uri = int(self.opts['tcp_master_pub_port']) epull_uri = int(self.opts['tcp_master_pull_port']) else: epub_uri = os.path.join(self.opts['sock_dir'], 'master_event_pub.ipc') epull_uri = os.path.join(self.opts['sock_dir'], 'master_event_pull.ipc') self.publisher = salt.transport.ipc.IPCMessagePublisher(self.opts, epub_uri, io_loop=self.io_loop) self.puller = salt.transport.ipc.IPCMessageServer(epull_uri, io_loop=self.io_loop, payload_handler=self.handle_publish) old_umask = os.umask(127) try: self.publisher.start() self.puller.start() if ((self.opts['ipc_mode'] != 'tcp') and (self.opts['publisher_acl'] or self.opts['external_auth'])): os.chmod(os.path.join(self.opts['sock_dir'], 'master_event_pub.ipc'), 438) finally: os.umask(old_umask) Finalize(self, self.close, exitpriority=15) self.io_loop.start()
'Get something from epull, publish it out epub, and return the package (or None)'
def handle_publish(self, package, _):
try: self.publisher.publish(package) return package except Exception: log.critical('Unexpected error while polling master events', exc_info=True) return None
'Initialize the EventReturn system Return an EventReturn instance'
def __init__(self, opts, log_queue=None):
super(EventReturn, self).__init__(log_queue=log_queue) self.opts = opts self.event_return_queue = self.opts['event_return_queue'] local_minion_opts = self.opts.copy() local_minion_opts['file_client'] = 'local' self.minion = salt.minion.MasterMinion(local_minion_opts) self.event_queue = [] self.stop = False
'Spin up the multiprocess event returner'
def run(self):
salt.utils.appendproctitle(self.__class__.__name__) self.event = get_event('master', opts=self.opts, listen=True) events = self.event.iter_events(full=True) self.event.fire_event({}, 'salt/event_listen/start') try: for event in events: if (event['tag'] == 'salt/event/exit'): self.stop = True if self._filter(event): self.event_queue.append(event) if (len(self.event_queue) >= self.event_return_queue): self.flush_events() if self.stop: break finally: if self.event_queue: self.flush_events()
'Take an event and run it through configured filters. Returns True if event should be stored, else False'
def _filter(self, event):
tag = event['tag'] if self.opts['event_return_whitelist']: ret = False else: ret = True for whitelist_match in self.opts['event_return_whitelist']: if fnmatch.fnmatch(tag, whitelist_match): ret = True break for blacklist_match in self.opts['event_return_blacklist']: if fnmatch.fnmatch(tag, blacklist_match): ret = False break return ret
'Fire an event off on the master server CLI Example: .. code-block:: bash salt \'*\' event.fire_master \'stuff to be in the event\' \'tag\''
def fire_master(self, data, tag, preload=None):
load = {} if preload: load.update(preload) load.update({'id': self.opts['id'], 'tag': tag, 'data': data, 'cmd': '_minion_event', 'tok': self.auth.gen_token('salt')}) channel = salt.transport.Channel.factory(self.opts) try: channel.send(load) except Exception: pass return True
'Pass in a state "running" dict, this is the return dict from a state call. The dict will be processed and fire events. By default yellows and reds fire events on the master and minion, but this can be configured.'
def fire_running(self, running):
load = {'id': self.opts['id'], 'events': [], 'cmd': '_minion_event'} for stag in sorted(running, key=(lambda k: running[k].get('__run_num__', 0))): if (running[stag]['result'] and (not running[stag]['changes'])): continue tag = 'state_{0}_{1}'.format(str(running[stag]['result']), ('True' if running[stag]['changes'] else 'False')) load['events'].append({'tag': tag, 'data': running[stag]}) channel = salt.transport.Channel.factory(self.opts) try: channel.send(load) except Exception: pass return True
'.. versionadded:: 2014.7.0 Recurse through etcd and return all values'
def tree(self, path):
ret = {} try: items = self.read(path) except (etcd.EtcdKeyNotFound, ValueError): return None except etcd.EtcdConnectionFailed: log.error("etcd: failed to perform 'tree' operation on path {0} due to connection error".format(path)) return None for item in items.children: comps = str(item.key).split('/') if (item.dir is True): if (item.key == path): continue ret[comps[(-1)]] = self.tree(item.key) else: ret[comps[(-1)]] = item.value return ret
'On deletion of the async wrapper, make sure to clean up the async stuff'
def __del__(self):
if hasattr(self, 'async'): if hasattr(self.async, 'close'): self.async.close() self.io_loop.close() del self.async del self.io_loop elif hasattr(self, 'io_loop'): self.io_loop.close() del self.io_loop
'Accept the provided key'
def accept(self, pub):
try: with salt.utils.files.fopen(self.path, 'r') as fp_: expiry = int(fp_.read()) except (OSError, IOError): log.error("Request to sign key for minion '%s' on hyper '%s' denied: no authorization", self.id, self.hyper) return False except ValueError: log.error('Invalid expiry data in %s', self.path) return False if ((time.time() - expiry) > 600): log.warning('Request to sign key for minion "{0}" on hyper "{1}" denied: authorization expired'.format(self.id, self.hyper)) return False pubfn = os.path.join(self.opts['pki_dir'], 'minions', self.id) with salt.utils.files.fopen(pubfn, 'w+') as fp_: fp_.write(pub) self.void() return True
'Prepare the master to expect a signing request'
def authorize(self):
with salt.utils.files.fopen(self.path, 'w+') as fp_: fp_.write(str(int(time.time()))) return True
'Invalidate any existing authorization'
def void(self):
try: os.unlink(self.path) return True except OSError: return False
'Raise an exception if value is empty. Otherwise strip it down. :param value: :return:'
@staticmethod def trim(value):
value = (value or '').strip() if (not value): raise CommandExecutionError('Empty value during sanitation') return text(value)
'Remove everything that would affect paths in the filename :param value: :return:'
@staticmethod def filename(value):
return re.sub('[^a-zA-Z0-9.-_ ]', '', os.path.basename(InputSanitizer.trim(value)))
'Clean value for RFC1123. :param value: :return:'
@staticmethod def hostname(value):
return re.sub('[^a-zA-Z0-9.-]', '', InputSanitizer.trim(value)).strip('.')
'Set up the stack and remote yard'
def __init__(self, node, sock_dir=None, listen=True, opts=None):
self.node = node self.sock_dir = sock_dir if (opts is None): opts = {} self.opts = opts self.stack = None self.ryn = 'manor' self.connected = False self.cpub = False self.__prep_stack(listen)
'Prepare the stack objects'
def __prep_stack(self, listen):
if (not self.stack): if (hasattr(transport, 'jobber_stack') and transport.jobber_stack): self.stack = transport.jobber_stack else: self.stack = transport.jobber_stack = self._setup_stack(ryn=self.ryn) log.debug('RAETEvent Using Jobber Stack at = {0}\n'.format(self.stack.ha)) if listen: self.subscribe()
'Included for compat with zeromq events, not required'
def subscribe(self, tag=None):
if (not self.connected): self.connect_pub()
'Included for compat with zeromq events, not required'
def unsubscribe(self, tag=None):
return
'Establish the publish connection'
def connect_pub(self):
try: route = {'dst': (None, self.ryn, 'event_req'), 'src': (None, self.stack.local.name, None)} msg = {'route': route} self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid) self.stack.serviceAll() self.connected = True self.cpub = True except Exception: pass
'Included for compat with zeromq events, not required'
def connect_pull(self, timeout=1000):
return
'Included for compat with zeromq events, not required'
@classmethod def unpack(cls, raw, serial=None):
return raw
'Get a single publication. IF no publication available THEN block for up to wait seconds AND either return publication OR None IF no publication available. IF wait is 0 then block forever.'
def get_event(self, wait=5, tag='', match_type=None, full=False, no_block=None, auto_reconnect=False):
if (not self.connected): self.connect_pub() start = time.time() while True: self.stack.serviceAll() if self.stack.rxMsgs: (msg, sender) = self.stack.rxMsgs.popleft() if (('tag' not in msg) and ('data' not in msg)): continue if (not msg['tag'].startswith(tag)): continue if full: return msg else: return msg['data'] if ((start + wait) < time.time()): return None time.sleep(0.01)
'Get the raw event msg without blocking or any other niceties'
def get_event_noblock(self):
if (not self.connected): self.connect_pub() self.stack.serviceAll() if self.stack.rxMsgs: (msg, sender) = self.stack.rxMsgs.popleft() if (('tag' not in msg) and ('data' not in msg)): return None return msg
'Creates a generator that continuously listens for events'
def iter_events(self, tag='', full=False, auto_reconnect=False):
while True: data = self.get_event(tag=tag, full=full, auto_reconnect=auto_reconnect) if (data is None): continue (yield data)
'Send a single event into the publisher with paylod dict "data" and event identifier "tag"'
def fire_event(self, data, tag, timeout=1000):
if (not str(tag)): raise ValueError('Empty tag.') if (not isinstance(data, MutableMapping)): raise ValueError("Dict object expected, not '{0}'.".format(data)) route = {'dst': (None, self.ryn, 'event_fire'), 'src': (None, self.stack.local.name, None)} msg = {'route': route, 'tag': tag, 'data': data} self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid) self.stack.serviceAll()
'Fire events based on information in the return load'
def fire_ret_load(self, load):
if (load.get('retcode') and load.get('fun')): if (load['fun'] in salt.utils.event.SUB_EVENT): try: for (tag, data) in six.iteritems(load.get('return', {})): data['retcode'] = load['retcode'] tags = tag.split('_|-') if (data.get('result') is False): self.fire_event(data, '{0}.{1}'.format(tags[0], tags[(-1)])) data['jid'] = load['jid'] data['id'] = load['id'] data['success'] = False data['return'] = 'Error: {0}.{1}'.format(tags[0], tags[(-1)]) data['fun'] = load['fun'] data['user'] = load['user'] self.fire_event(data, salt.utils.event.tagify([load['jid'], 'sub', load['id'], 'error', load['fun']], 'job')) except Exception: pass
'Here for compatability'
def close_pub(self):
return
'Establish the publish connection'
def connect_pub(self):
try: route = {'dst': (None, self.ryn, 'presence_req'), 'src': (None, self.stack.local.name, None)} msg = {'route': route} if self.state: msg['data'] = {'state': self.state} self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid) self.stack.serviceAll() self.connected = True except Exception: pass
'Establish the publish connection'
def connect_pub(self):
try: route = {'dst': (self.estate, None, 'stats_req'), 'src': (None, self.stack.local.name, None)} msg = {'route': route, 'tag': self.tag} self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid) self.stack.serviceAll() self.connected = True except Exception: pass
':param required: If the configuration item is required. Defaults to ``False``.'
def __init__(self, required=None, **extra):
if (required is not None): self.required = required self.extra = extra
'Run any validation check you need the instance attributes. ATTENTION: Don\'t call the parent class when overriding this method because it will just duplicate the executions. This class\'es metaclass will take care of that.'
def __validate_attributes__(self):
if (self.required not in (True, False)): raise RuntimeError("'required' can only be True/False")
'Return the argname value looking up on all possible attributes'
def _get_argname_value(self, argname):
argvalue = getattr(self, '__get_{0}__'.format(argname), None) if ((argvalue is not None) and callable(argvalue)): argvalue = argvalue() if (argvalue is None): argvalue = getattr(self, argname, None) if (argvalue is None): argvalue = getattr(self, '__{0}__'.format(argname), None) if (argvalue is None): argvalue = self.extra.get(argname, None) return argvalue
'Return a serializable form of the config instance'
def serialize(self):
raise NotImplementedError
':param required: If the configuration item is required. Defaults to ``False``. :param title: A short explanation about the purpose of the data described by this item. :param description: A detailed explanation about the purpose of the data described by this item. :param default: The default value for this configuration item. May be :data:`.Null` (a special value to set the default value to null). :param enum: A list(list, tuple, set) of valid choices.'
def __init__(self, title=None, description=None, default=None, enum=None, enumNames=None, **kwargs):
if (title is not None): self.title = title if (description is not None): self.description = description if (default is not None): self.default = default if (enum is not None): self.enum = enum if (enumNames is not None): self.enumNames = enumNames super(BaseSchemaItem, self).__init__(**kwargs)
'Return a serializable form of the config instance'
def serialize(self):
serialized = {'type': self.__type__} for argname in self._attributes: if (argname == 'required'): continue argvalue = self._get_argname_value(argname) if (argvalue is not None): if (argvalue is Null): argvalue = None if (self.__serialize_attr_aliases__ and (argname in self.__serialize_attr_aliases__)): argname = self.__serialize_attr_aliases__[argname] serialized[argname] = argvalue return serialized
':param required: If the configuration item is required. Defaults to ``False``. :param title: A short explanation about the purpose of the data described by this item. :param description: A detailed explanation about the purpose of the data described by this item. :param default: The default value for this configuration item. May be :data:`.Null` (a special value to set the default value to null). :param enum: A list(list, tuple, set) of valid choices. :param format: A semantic format of the string (for example, ``"date-time"``, ``"email"``, or ``"uri"``). :param pattern: A regular expression (ECMA 262) that a string value must match. :param min_length: The minimum length :param max_length: The maximum length'
def __init__(self, format=None, pattern=None, min_length=None, max_length=None, **kwargs):
if (format is not None): self.format = format if (pattern is not None): self.pattern = pattern if (min_length is not None): self.min_length = min_length if (max_length is not None): self.max_length = max_length super(StringItem, self).__init__(**kwargs)
':param required: If the configuration item is required. Defaults to ``False``. :param title: A short explanation about the purpose of the data described by this item. :param description: A detailed explanation about the purpose of the data described by this item. :param default: The default value for this configuration item. May be :data:`.Null` (a special value to set the default value to null). :param enum: A list(list, tuple, set) of valid choices. :param multiple_of: A value must be a multiple of this factor. :param minimum: The minimum allowed value :param exclusive_minimum: Whether a value is allowed to be exactly equal to the minimum :param maximum: The maximum allowed value :param exclusive_maximum: Whether a value is allowed to be exactly equal to the maximum'
def __init__(self, multiple_of=None, minimum=None, exclusive_minimum=None, maximum=None, exclusive_maximum=None, **kwargs):
if (multiple_of is not None): self.multiple_of = multiple_of if (minimum is not None): self.minimum = minimum if (exclusive_minimum is not None): self.exclusive_minimum = exclusive_minimum if (maximum is not None): self.maximum = maximum if (exclusive_maximum is not None): self.exclusive_maximum = exclusive_maximum super(NumberItem, self).__init__(**kwargs)
':param required: If the configuration item is required. Defaults to ``False``. :param title: A short explanation about the purpose of the data described by this item. :param description: A detailed explanation about the purpose of the data described by this item. :param default: The default value for this configuration item. May be :data:`.Null` (a special value to set the default value to null). :param enum: A list(list, tuple, set) of valid choices. :param items: Either of the following: * :class:`BaseSchemaItem` -- all items of the array must match the field schema; * a list or a tuple of :class:`fields <.BaseSchemaItem>` -- all items of the array must be valid according to the field schema at the corresponding index (tuple typing); :param min_items: Minimum length of the array :param max_items: Maximum length of the array :param unique_items: Whether all the values in the array must be distinct. :param additional_items: If the value of ``items`` is a list or a tuple, and the array length is larger than the number of fields in ``items``, then the additional items are described by the :class:`.BaseField` passed using this argument. :type additional_items: bool or :class:`.BaseSchemaItem`'
def __init__(self, items=None, min_items=None, max_items=None, unique_items=None, additional_items=None, **kwargs):
if (items is not None): self.items = items if (min_items is not None): self.min_items = min_items if (max_items is not None): self.max_items = max_items if (unique_items is not None): self.unique_items = unique_items if (additional_items is not None): self.additional_items = additional_items super(ArrayItem, self).__init__(**kwargs)
':param required: If the configuration item is required. Defaults to ``False``. :type required: boolean :param title: A short explanation about the purpose of the data described by this item. :type title: str :param description: A detailed explanation about the purpose of the data described by this item. :param default: The default value for this configuration item. May be :data:`.Null` (a special value to set the default value to null). :param enum: A list(list, tuple, set) of valid choices. :param properties: A dictionary containing fields :param pattern_properties: A dictionary whose keys are regular expressions (ECMA 262). Properties match against these regular expressions, and for any that match, the property is described by the corresponding field schema. :type pattern_properties: dict[str -> :class:`.Schema` or :class:`.SchemaItem` or :class:`.BaseSchemaItem`] :param additional_properties: Describes properties that are not described by the ``properties`` or ``pattern_properties``. :type additional_properties: bool or :class:`.Schema` or :class:`.SchemaItem` or :class:`.BaseSchemaItem` :param min_properties: A minimum number of properties. :type min_properties: int :param max_properties: A maximum number of properties :type max_properties: int'
def __init__(self, properties=None, pattern_properties=None, additional_properties=None, min_properties=None, max_properties=None, **kwargs):
if (properties is not None): self.properties = properties if (pattern_properties is not None): self.pattern_properties = pattern_properties if (additional_properties is not None): self.additional_properties = additional_properties if (min_properties is not None): self.min_properties = min_properties if (max_properties is not None): self.max_properties = max_properties super(DictItem, self).__init__(**kwargs)
'Adds any missed schema attributes to the _attributes list The attributes can be class attributes and they won\'t be included in the _attributes list automatically'
def _add_missing_schema_attributes(self):
for attr in [attr for attr in dir(self) if (not attr.startswith('__'))]: attr_val = getattr(self, attr) if (isinstance(getattr(self, attr), SchemaItem) and (attr not in self._attributes)): self._attributes.append(attr)
'The serialization of the complex item is a pointer to the item definition'
def serialize(self):
return {'$ref': '#/definitions/{0}'.format(self.definition_name)}
'Returns the definition of the complex item'
def get_definition(self):
serialized = super(ComplexSchemaItem, self).serialize() del serialized['definition_name'] serialized['title'] = self.definition_name properties = {} required_attr_names = [] for attr_name in self._attributes: attr = getattr(self, attr_name) if (attr and isinstance(attr, BaseSchemaItem)): del serialized[attr_name] properties[attr_name] = attr.serialize() properties[attr_name]['type'] = attr.__type__ if attr.required: required_attr_names.append(attr_name) if (serialized.get('properties') is None): serialized['properties'] = {} serialized['properties'].update(properties) if required_attr_names: serialized['required'] = required_attr_names return serialized
'Returns a dictionary of the complex attributes'
def get_complex_attrs(self):
return [getattr(self, attr_name) for attr_name in self._attributes if isinstance(getattr(self, attr_name), ComplexSchemaItem)]
'Return a file client. Instantiates on first call.'
def file_client(self):
if (not self._file_client): self._file_client = salt.fileclient.get_file_client(self.opts, self.pillar_rend) return self._file_client