desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Return CORS headers for preflight requests'
def options(self, *args, **kwargs):
request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
'All logins are done over post, this is a parked enpoint .. http:get:: /login :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: http GET /login HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: http HTTP/1.1 401 Unauthorized Content-Type: application/json Content-Length: 58 {"status": "401 Unauthorized", "return": "Please log in"}'
def get(self):
self.set_status(401) self.set_header('WWW-Authenticate', 'Session') ret = {'status': '401 Unauthorized', 'return': 'Please log in'} self.write(self.serialize(ret))
':ref:`Authenticate <rest_tornado-auth>` against Salt\'s eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 400: |400| :status 401: |401| :status 406: |406| :status 500: |500| **Example request:** .. code-block:: bash curl -si localhost:8000/login \ -H "Accept: application/json" \ -d username=\'saltuser\' \ -d password=\'saltpass\' \ -d eauth=\'pam\' .. code-block:: http POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*"'
def post(self):
try: request_payload = self.deserialize(self.request.body) if (not isinstance(request_payload, dict)): self.send_error(400) return creds = {'username': request_payload['username'], 'password': request_payload['password'], 'eauth': request_payload['eauth']} except KeyError: self.send_error(400) return token = self.application.auth.mk_token(creds) if ('token' not in token): self.send_error(401) return try: perms = self.application.opts['external_auth'][token['eauth']][token['name']] except KeyError: self.send_error(401) return except (AttributeError, IndexError): logging.debug("Configuration for external_auth malformed for eauth '{0}', and user '{1}'.".format(token.get('eauth'), token.get('name')), exc_info=True) self.send_error(500) return ret = {'return': [{'token': token['token'], 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms}]} self.write(self.serialize(ret))
'An enpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: http GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"}'
def get(self):
ret = {'clients': list(self.saltclients.keys()), 'return': 'Welcome'} self.write(self.serialize(ret))
'Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \ -H "Accept: application/x-yaml" \ -H "X-Auth-Token: d40d1e1e" \ -d client=local \ -d tgt=\'*\' \ -d fun=\'test.ping\' \ -d arg .. code-block:: http POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate\'s return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: http HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compount-command-execution.'
@tornado.web.asynchronous def post(self):
if (not self._verify_auth()): self.redirect('/login') return self.disbatch()
'Disbatch all lowstates to the appropriate clients'
@tornado.gen.coroutine def disbatch(self):
ret = [] for low in self.lowstate: if (not self._verify_client(low)): return if ((self.token is not None) and ('token' not in low)): low['token'] = self.token if (not (('token' in low) or (('username' in low) and ('password' in low) and ('eauth' in low)))): ret.append('Failed to authenticate') break try: chunk_ret = (yield getattr(self, '_disbatch_{0}'.format(low['client']))(low)) ret.append(chunk_ret) except EauthAuthenticationError as exc: ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) logger.error('Unexpected exception while handling request:', exc_info=True) self.write(self.serialize({'return': ret})) self.finish()
'Dispatch local client commands'
@tornado.gen.coroutine def _disbatch_local(self, chunk):
chunk_ret = {} f_call = self._format_call_run_job_async(chunk) try: pub_data = (yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {}))) except EauthAuthenticationError: raise tornado.gen.Return('Not authorized to run this job') if ('jid' not in pub_data): raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') minions_remaining = pub_data['minions'] syndic_min_wait = None if self.application.opts['order_masters']: syndic_min_wait = tornado.gen.sleep(self.application.opts['syndic_wait']) job_not_running = self.job_not_running(pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions_remaining=minions_remaining) if (syndic_min_wait is not None): (yield syndic_min_wait) chunk_ret = (yield self.all_returns(pub_data['jid'], finish_futures=[job_not_running], minions_remaining=minions_remaining)) raise tornado.gen.Return(chunk_ret)
'Return a future which will complete once all returns are completed (according to minions_remaining), or one of the passed in "finish_futures" completes'
@tornado.gen.coroutine def all_returns(self, jid, finish_futures=None, minions_remaining=None):
if (finish_futures is None): finish_futures = [] if (minions_remaining is None): minions_remaining = [] ret_tag = tagify([jid, 'ret'], 'job') chunk_ret = {} while True: ret_event = self.application.event_listener.get_event(self, tag=ret_tag) f = (yield Any(([ret_event] + finish_futures))) if (f in finish_futures): raise tornado.gen.Return(chunk_ret) event = f.result() chunk_ret[event['data']['id']] = event['data']['return'] try: minions_remaining.remove(event['data']['id']) except ValueError: pass if (len(minions_remaining) == 0): raise tornado.gen.Return(chunk_ret)
'Return a future which will complete once jid (passed in) is no longer running on tgt'
@tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions_remaining=None):
if (minions_remaining is None): minions_remaining = [] ping_pub_data = (yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type)) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = (yield self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout'])) except TimeoutException: if (not minion_running): raise tornado.gen.Return(True) else: ping_pub_data = (yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type)) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue if (event['data'].get('return', {}) == {}): continue minion_running = True id_ = event['data']['id'] if (id_ not in minions_remaining): minions_remaining.append(event['data']['id'])
'Disbatch local client_async commands'
@tornado.gen.coroutine def _disbatch_local_async(self, chunk):
f_call = self._format_call_run_job_async(chunk) pub_data = (yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {}))) raise tornado.gen.Return(pub_data)
'Disbatch runner client commands'
@tornado.gen.coroutine def _disbatch_runner(self, chunk):
pub_data = self.saltclients['runner'](chunk) tag = (pub_data['tag'] + '/ret') try: event = (yield self.application.event_listener.get_event(self, tag=tag)) raise tornado.gen.Return(event['data']['return']) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute')
'Disbatch runner client_async commands'
@tornado.gen.coroutine def _disbatch_runner_async(self, chunk):
pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data)
'A convenience URL for getting lists of minions or getting minion details .. http:get:: /minions/(mid) :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/minions/ms-3 .. code-block:: http GET /minions/ms-3 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 129005 Content-Type: application/x-yaml return: - ms-3: grains.items:'
@tornado.web.asynchronous def get(self, mid=None):
if (not self._verify_auth()): self.redirect('/login') return self.lowstate = [{'client': 'local', 'tgt': (mid or '*'), 'fun': 'grains.items'}] self.disbatch()
'Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \ -H "Accept: application/x-yaml" \ -d tgt=\'*\' \ -d fun=\'status.diskusage\' .. code-block:: http POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: http HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: \'20130603122505459265\' minions: [ms-4, ms-3, ms-2, ms-1, ms-0]'
@tornado.web.asynchronous def post(self):
if (not self._verify_auth()): self.redirect('/login') return for low in self.lowstate: if ('client' not in low): low['client'] = 'local_async' continue if (low.get('client') != 'local_async'): self.set_status(400) self.write("We don't serve your kind here") self.finish() return self.disbatch()
'A convenience URL for getting lists of previously run jobs or getting the return from a single job .. http:get:: /jobs/(jid) List jobs or show a single job from the job cache. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/jobs .. code-block:: http GET /jobs HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: - \'20121130104633606931\': Arguments: - \'3\' Function: test.fib Start Time: 2012, Nov 30 10:46:33.606931 Target: jerry Target-type: glob **Example request:** .. code-block:: bash curl -i localhost:8000/jobs/20121130104633606931 .. code-block:: http GET /jobs/20121130104633606931 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml info: - Arguments: - \'3\' Function: test.fib Minions: - jerry Start Time: 2012, Nov 30 10:46:33.606931 Target: \'*\' Target-type: glob User: saltdev jid: \'20121130104633606931\' return: - jerry: - - 0 - 1 - 1 - 2 - 6.9141387939453125e-06'
@tornado.web.asynchronous def get(self, jid=None):
if (not self._verify_auth()): self.redirect('/login') return if jid: self.lowstate = [{'fun': 'jobs.list_job', 'jid': jid, 'client': 'runner'}] else: self.lowstate = [{'fun': 'jobs.list_jobs', 'client': 'runner'}] self.disbatch()
'Run commands bypassing the :ref:`normal session handling <rest_cherrypy-auth>` .. http:post:: /run This entry point is primarily for "one-off" commands. Each request must pass full Salt authentication credentials. Otherwise this URL is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`. :term:`lowstate` data describing Salt commands must be sent in the request body. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -sS localhost:8000/run \ -H \'Accept: application/x-yaml\' \ -d client=\'local\' \ -d tgt=\'*\' \ -d fun=\'test.ping\' \ -d username=\'saltdev\' \ -d password=\'saltdev\' \ -d eauth=\'pam\' .. code-block:: http POST /run HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 75 Content-Type: application/x-www-form-urlencoded client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true'
@tornado.web.asynchronous def post(self):
self.disbatch()
'An HTTP stream of the Salt master event bus This stream is formatted per the Server Sent Events (SSE) spec. Each event is formatted as JSON. .. http:get:: /events :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -NsS localhost:8000/events .. code-block:: http GET /events HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: http HTTP/1.1 200 OK Connection: keep-alive Cache-Control: no-cache Content-Type: text/event-stream;charset=utf-8 retry: 400 data: {\'tag\': \'\', \'data\': {\'minions\': [\'ms-4\', \'ms-3\', \'ms-2\', \'ms-1\', \'ms-0\']}} data: {\'tag\': \'20130802115730568475\', \'data\': {\'jid\': \'20130802115730568475\', \'return\': True, \'retcode\': 0, \'success\': True, \'cmd\': \'_return\', \'fun\': \'test.ping\', \'id\': \'ms-1\'}} The event stream can be easily consumed via JavaScript: .. code-block:: javascript # Note, you must be authenticated! var source = new EventSource(\'/events\'); source.onopen = function() { console.debug(\'opening\') }; source.onerror = function(e) { console.debug(\'error!\', e) }; source.onmessage = function(e) { console.debug(e.data) }; Or using CORS: .. code-block:: javascript var source = new EventSource(\'/events\', {withCredentials: true}); Some browser clients lack CORS support for the ``EventSource()`` API. Such clients may instead pass the :mailheader:`X-Auth-Token` value as an URL parameter: .. code-block:: bash curl -NsS localhost:8000/events/6d1b722e It is also possible to consume the stream via the shell. Records are separated by blank lines; the ``data:`` and ``tag:`` prefixes will need to be removed manually before attempting to unserialize the JSON. curl\'s ``-N`` flag turns off input buffering which is required to process the stream incrementally. Here is a basic example of printing each event as it comes in: .. code-block:: bash curl -NsS localhost:8000/events |\ while IFS= read -r line ; do echo $line done Here is an example of using awk to filter events based on tag: .. code-block:: bash curl -NsS localhost:8000/events |\ awk \' BEGIN { RS=""; FS="\\n" } $1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 } tag: salt/job/20140112010149808995/new data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}} tag: 20140112010149808995 data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}'
@tornado.gen.coroutine def get(self):
if (not self._verify_auth()): self.redirect('/login') return self.set_header('Content-Type', 'text/event-stream') self.set_header('Cache-Control', 'no-cache') self.set_header('Connection', 'keep-alive') self.write(u'retry: {0}\n'.format(400)) self.flush() while True: try: event = (yield self.application.event_listener.get_event(self)) self.write(u'tag: {0}\n'.format(event.get('tag', ''))) self.write(u'data: {0}\n\n'.format(json.dumps(event))) self.flush() except TimeoutException: break
'Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook -d foo=\'Foo!\' -d bar=\'Bar!\' .. code-block:: http POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/x-www-form-urlencoded foo=Foo&bar=Bar! **Example response**: .. code-block:: http HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``http://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt\'s Reactor:: Event fired at Fri Feb 14 17:40:11 2014 Tag: salt/netapi/hook/mycompany/build/success Data: {\'_stamp\': \'2014-02-14_17:40:11.440996\', \'headers\': { \'X-My-Secret-Key\': \'F0fAgoQjIT@W\', \'Content-Length\': \'37\', \'Content-Type\': \'application/json\', \'Host\': \'localhost:8000\', \'Remote-Addr\': \'127.0.0.1\'}, \'post\': {\'revision\': \'aa22a3c4b2e7\', \'result\': True}} Salt\'s Reactor could listen for the event: .. code-block:: yaml reactor: - \'salt/netapi/hook/mycompany/build/*\': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: yaml {% set secret_key = data.get(\'headers\', {}).get(\'X-My-Secret-Key\') %} {% set build = data.get(\'post\', {}) %} {% if secret_key == \'F0fAgoQjIT@W\' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: \'application*\' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %}'
def post(self, tag_suffix=None):
disable_auth = self.application.mod_opts.get('webhook_disable_auth') if ((not disable_auth) and (not self._verify_auth())): self.redirect('/login') return tag = 'salt/netapi/hook' if tag_suffix: tag += tag_suffix self.event = salt.utils.event.get_event('master', self.application.opts['sock_dir'], self.application.opts['transport'], opts=self.application.opts, listen=False) arguments = {} for argname in self.request.query_arguments: value = self.get_arguments(argname) if (len(value) == 1): value = value[0] arguments[argname] = value ret = self.event.fire_event({'post': self.raw_data, 'get': arguments, 'headers': dict(self.request.headers)}, tag) self.write(self.serialize({'success': ret}))
'handler is expected to be the server side end of a websocket connection.'
def __init__(self, handler):
self.handler = handler self.jobs = {} self.minions = {}
'Publishes minions as a list of dicts.'
def publish_minions(self):
logger.debug('in publish minions') minions = {} logger.debug('starting loop') for (minion, minion_info) in six.iteritems(self.minions): logger.debug(minion) curr_minion = {} curr_minion.update(minion_info) curr_minion.update({'id': minion}) minions[minion] = curr_minion logger.debug('ended loop') ret = {'minions': minions} self.handler.write_message(u'{0}\n\n'.format(json.dumps(ret)))
'Publishes the data to the event stream.'
def publish(self, key, data):
publish_data = {key: data} pub = u'{0}\n\n'.format(json.dumps(publish_data)) self.handler.write_message(pub)
'Associate grains data with a minion and publish minion update'
def process_minion_update(self, event_data):
tag = event_data['tag'] event_info = event_data['data'] (_, _, _, _, mid) = tag.split('/') if (not self.minions.get(mid, None)): self.minions[mid] = {} minion = self.minions[mid] minion.update({'grains': event_info['return']}) logger.debug('In process minion grains update with minions={0}'.format(self.minions)) self.publish_minions()
'Process a /ret event returned by Salt for a particular minion. These events contain the returned results from a particular execution.'
def process_ret_job_event(self, event_data):
tag = event_data['tag'] event_info = event_data['data'] (_, _, jid, _, mid) = tag.split('/') job = self.jobs.setdefault(jid, {}) minion = job.setdefault('minions', {}).setdefault(mid, {}) minion.update({'return': event_info['return']}) minion.update({'retcode': event_info['retcode']}) minion.update({'success': event_info['success']}) job_complete = all([minion['success'] for (mid, minion) in six.iteritems(job['minions'])]) if job_complete: job['state'] = 'complete' self.publish('jobs', self.jobs)
'Creates a new job with properties from the event data like jid, function, args, timestamp. Also sets the initial state to started. Minions that are participating in this job are also noted.'
def process_new_job_event(self, event_data):
job = None tag = event_data['tag'] event_info = event_data['data'] minions = {} for mid in event_info['minions']: minions[mid] = {'success': False} job = {'jid': event_info['jid'], 'start_time': event_info['_stamp'], 'minions': minions, 'fun': event_info['fun'], 'tgt': event_info['tgt'], 'tgt_type': event_info['tgt_type'], 'state': 'running'} self.jobs[event_info['jid']] = job self.publish('jobs', self.jobs)
'Tag: salt/key Data: {\'_stamp\': \'2014-05-20T22:45:04.345583\', \'act\': \'delete\', \'id\': \'compute.home\', \'result\': True}'
def process_key_event(self, event_data):
tag = event_data['tag'] event_info = event_data['data'] if (event_info['act'] == 'delete'): self.minions.pop(event_info['id'], None) elif (event_info['act'] == 'accept'): self.minions.setdefault(event_info['id'], {}) self.publish_minions()
'Check if any minions have connected or dropped. Send a message to the client if they have.'
def process_presence_events(self, salt_data, token, opts):
logger.debug('In presence') changed = False if set(salt_data['data'].get('lost', [])): dropped_minions = set(salt_data['data'].get('lost', [])) else: dropped_minions = (set(self.minions) - set(salt_data['data'].get('present', []))) for minion in dropped_minions: changed = True logger.debug('Popping {0}'.format(minion)) self.minions.pop(minion, None) if set(salt_data['data'].get('new', [])): logger.debug('got new minions') new_minions = set(salt_data['data'].get('new', [])) changed = True elif (set(salt_data['data'].get('present', [])) - set(self.minions)): logger.debug('detected new minions') new_minions = (set(salt_data['data'].get('present', [])) - set(self.minions)) changed = True else: new_minions = [] tgt = ','.join(new_minions) for mid in new_minions: logger.debug('Adding minion') self.minions[mid] = {} if tgt: changed = True client = salt.netapi.NetapiClient(opts) client.run({'fun': 'grains.items', 'tgt': tgt, 'expr_type': 'list', 'mode': 'client', 'client': 'local', 'async': 'local_async', 'token': token}) if changed: self.publish_minions()
'Process events and publish data'
def process(self, salt_data, token, opts):
logger.debug('In process {0}'.format(threading.current_thread())) logger.debug(salt_data['tag']) logger.debug(salt_data) parts = salt_data['tag'].split('/') if (len(parts) < 2): return if (parts[1] == 'job'): logger.debug('In job part 1') if (parts[3] == 'new'): logger.debug('In new job') self.process_new_job_event(salt_data) elif (parts[3] == 'ret'): logger.debug('In ret') self.process_ret_job_event(salt_data) if (salt_data['data']['fun'] == 'grains.items'): self.process_minion_update(salt_data) elif (parts[1] == 'key'): logger.debug('In key') self.process_key_event(salt_data) elif (parts[1] == 'presence'): self.process_presence_events(salt_data, token, opts)
'Check the token, returns a 401 if the token is invalid. Else open the websocket connection'
def get(self, token):
logger.debug('In the websocket get method') self.token = token if (not self.application.auth.get_tok(token)): logger.debug('Refusing websocket connection, bad token!') self.send_error(401) return super(AllEventsHandler, self).get(token)
'Return a websocket connection to Salt representing Salt\'s "real time" event stream.'
def open(self, token):
self.connected = False
'Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt\'s "real time" event stream.'
@tornado.gen.coroutine def on_message(self, message):
logger.debug('Got websocket message {0}'.format(message)) if (message == 'websocket client ready'): if self.connected: logger.debug('Websocket already connected, returning') return self.connected = True while True: try: event = (yield self.application.event_listener.get_event(self)) self.write_message(json.dumps(event)) except Exception as err: logger.info('Error! Ending server side websocket connection. Reason = {0}'.format(str(err))) break self.close() else: pass
'Cleanup.'
def on_close(self, *args, **kwargs):
logger.debug('In the websocket close method') self.close()
'If cors is enabled, check that the origin is allowed'
def check_origin(self, origin):
mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): return bool(_check_cors_origin(origin, mod_opts['cors_origin'])) else: return super(AllEventsHandler, self).check_origin(origin)
'Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt\'s "real time" event stream.'
@tornado.gen.coroutine def on_message(self, message):
logger.debug('Got websocket message {0}'.format(message)) if (message == 'websocket client ready'): if self.connected: logger.debug('Websocket already connected, returning') return self.connected = True evt_processor = event_processor.SaltInfo(self) client = salt.netapi.NetapiClient(self.application.opts) client.run({'fun': 'grains.items', 'tgt': '*', 'token': self.token, 'mode': 'client', 'async': 'local_async', 'client': 'local'}) while True: try: event = (yield self.application.event_listener.get_event(self)) evt_processor.process(event, self.token, self.application.opts) except Exception as err: logger.debug('Error! Ending server side websocket connection. Reason = {0}'.format(str(err))) break self.close() else: pass
'Return the primary name associate with the load, if an empty string is returned then the load does not match the function'
def load_name(self, load):
if ('eauth' not in load): return '' fstr = '{0}.auth'.format(load['eauth']) if (fstr not in self.auth): return '' try: pname_arg = salt.utils.arg_lookup(self.auth[fstr])['args'][0] return load[pname_arg] except IndexError: return ''
'Return the token and set the cache data for use Do not call this directly! Use the time_auth method to overcome timing attacks'
def __auth_call(self, load):
if ('eauth' not in load): return False fstr = '{0}.auth'.format(load['eauth']) if (fstr not in self.auth): return False fcall = salt.utils.format_call(self.auth[fstr], load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS) try: if ('kwargs' in fcall): return self.auth[fstr](*fcall['args'], **fcall['kwargs']) else: return self.auth[fstr](*fcall['args']) except Exception as e: log.debug('Authentication module threw {0}'.format(e)) return False
'Make sure that all failures happen in the same amount of time'
def time_auth(self, load):
start = time.time() ret = self.__auth_call(load) if ret: return ret f_time = (time.time() - start) if (f_time > self.max_fail): self.max_fail = f_time deviation = (self.max_fail / 4) r_time = random.SystemRandom().uniform((self.max_fail - deviation), (self.max_fail + deviation)) while ((start + r_time) > time.time()): time.sleep(0.001) return False
'Returns ACL for a specific user. Returns None if eauth doesn\'t provide any for the user. I. e. None means: use acl declared in master config.'
def __get_acl(self, load):
if ('eauth' not in load): return None mod = self.opts['eauth_acl_module'] if (not mod): mod = load['eauth'] fstr = '{0}.acl'.format(mod) if (fstr not in self.auth): return None fcall = salt.utils.format_call(self.auth[fstr], load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS) try: return self.auth[fstr](*fcall['args'], **fcall['kwargs']) except Exception as e: log.debug('Authentication module threw {0}'.format(e)) return None
'Allows eauth module to modify the access list right before it\'ll be applied to the request. For example ldap auth module expands entries'
def __process_acl(self, load, auth_list):
if ('eauth' not in load): return auth_list fstr = '{0}.process_acl'.format(load['eauth']) if (fstr not in self.auth): return auth_list try: return self.auth[fstr](auth_list, self.opts) except Exception as e: log.debug('Authentication module threw {0}'.format(e)) return auth_list
'Read in a load and return the groups a user is a member of by asking the appropriate provider'
def get_groups(self, load):
if ('eauth' not in load): return False fstr = '{0}.groups'.format(load['eauth']) if (fstr not in self.auth): return False fcall = salt.utils.format_call(self.auth[fstr], load, expected_extra_kws=AUTH_INTERNAL_KEYWORDS) try: return self.auth[fstr](*fcall['args'], **fcall['kwargs']) except IndexError: return False except Exception: return None
'Return bool if requesting user is allowed to set custom expire'
def _allow_custom_expire(self, load):
expire_override = self.opts.get('token_expire_user_override', False) if (expire_override is True): return True if isinstance(expire_override, collections.Mapping): expire_whitelist = expire_override.get(load['eauth'], []) if isinstance(expire_whitelist, collections.Iterable): if (load.get('username') in expire_whitelist): return True return False
'Run time_auth and create a token. Return False or the token'
def mk_token(self, load):
if (not self.authenticate_eauth(load)): return {} fstr = '{0}.auth'.format(load['eauth']) hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5')) tok = str(hash_type(os.urandom(512)).hexdigest()) t_path = os.path.join(self.opts['token_dir'], tok) while os.path.isfile(t_path): tok = str(hash_type(os.urandom(512)).hexdigest()) t_path = os.path.join(self.opts['token_dir'], tok) if self._allow_custom_expire(load): token_expire = load.pop('token_expire', self.opts['token_expire']) else: _ = load.pop('token_expire', None) token_expire = self.opts['token_expire'] tdata = {'start': time.time(), 'expire': (time.time() + token_expire), 'name': self.load_name(load), 'eauth': load['eauth'], 'token': tok} if self.opts['keep_acl_in_token']: acl_ret = self.__get_acl(load) tdata['auth_list'] = acl_ret if ('groups' in load): tdata['groups'] = load['groups'] try: with salt.utils.files.set_umask(127): with salt.utils.files.fopen(t_path, 'w+b') as fp_: fp_.write(self.serial.dumps(tdata)) except (IOError, OSError): log.warning('Authentication failure: can not write token file "{0}".'.format(t_path)) return {} return tdata
'Return the name associated with the token, or False if the token is not valid'
def get_tok(self, tok):
t_path = os.path.join(self.opts['token_dir'], tok) if (not os.path.isfile(t_path)): return {} try: with salt.utils.files.fopen(t_path, 'rb') as fp_: tdata = self.serial.loads(fp_.read()) except (IOError, OSError): log.warning('Authentication failure: can not read token file "{0}".'.format(t_path)) return {} rm_tok = False if ('expire' not in tdata): rm_tok = True if (tdata.get('expire', '0') < time.time()): rm_tok = True if rm_tok: try: os.remove(t_path) return {} except (IOError, OSError): pass return tdata
'Authenticate a user by the token specified in load. Return the token object or False if auth failed.'
def authenticate_token(self, load):
token = self.get_tok(load['token']) if ((not token) or (token['eauth'] not in self.opts['external_auth'])): log.warning('Authentication failure of type "token" occurred.') return False return token
'Authenticate a user by the external auth module specified in load. Return True on success or False on failure.'
def authenticate_eauth(self, load):
if ('eauth' not in load): log.warning('Authentication failure of type "eauth" occurred.') return False if (load['eauth'] not in self.opts['external_auth']): log.warning('Authentication failure of type "eauth" occurred.') return False if (not self.time_auth(load)): log.warning('Authentication failure of type "eauth" occurred.') return False return True
'Authenticate a user by the key passed in load. Return the effective user id (name) if it\'s differ from the specified one (for sudo). If the effective user id is the same as passed one return True on success or False on failure.'
def authenticate_key(self, load, key):
auth_key = load.pop('key') if (not auth_key): log.warning('Authentication failure of type "user" occurred.') return False if ('user' in load): auth_user = AuthUser(load['user']) if auth_user.is_sudo(): if (auth_key != key[self.opts.get('user', 'root')]): log.warning('Authentication failure of type "user" occurred.') return False return auth_user.sudo_name() elif ((load['user'] == self.opts.get('user', 'root')) or (load['user'] == 'root')): if (auth_key != key[self.opts.get('user', 'root')]): log.warning('Authentication failure of type "user" occurred.') return False elif auth_user.is_running_user(): if (auth_key != key.get(load['user'])): log.warning('Authentication failure of type "user" occurred.') return False elif (auth_key == key.get('root')): pass elif (load['user'] in key): if (auth_key != key[load['user']]): log.warning('Authentication failure of type "user" occurred.') return False return load['user'] else: log.warning('Authentication failure of type "user" occurred.') return False elif (auth_key != key[salt.utils.get_user()]): log.warning('Authentication failure of type "other" occurred.') return False return True
'Retrieve access list for the user specified in load. The list is built by eauth module or from master eauth configuration. Return None if current configuration doesn\'t provide any ACL for the user. Return an empty list if the user has no rights to execute anything on this master and returns non-empty list if user is allowed to execute particular functions.'
def get_auth_list(self, load):
auth_list = self.__get_acl(load) if (auth_list is not None): return auth_list if (load['eauth'] not in self.opts['external_auth']): log.warning('Authorization failure occurred.') return None name = self.load_name(load) groups = self.get_groups(load) eauth_config = self.opts['external_auth'][load['eauth']] if ((groups is None) or (groups is False)): groups = [] group_perm_keys = [item for item in eauth_config if item.endswith('%')] group_auth_match = False for group_config in group_perm_keys: group_config = group_config.rstrip('%') for group in groups: if (group == group_config): group_auth_match = True external_auth_in_db = False for entry in eauth_config: if entry.startswith('^'): external_auth_in_db = True break if (not ((((name in eauth_config) | ('*' in eauth_config)) | group_auth_match) | external_auth_in_db)): log.warning('Authorization failure occurred.') return None auth_list = [] if (name in eauth_config): auth_list = eauth_config[name] elif ('*' in eauth_config): auth_list = eauth_config['*'] if group_auth_match: auth_list = self.ckminions.fill_auth_list_from_groups(eauth_config, groups, auth_list) auth_list = self.__process_acl(load, auth_list) log.trace('Compiled auth_list: {0}'.format(auth_list)) return auth_list
'Gather and create the authorization data sets We\'re looking at several constructs here. Standard eauth: allow jsmith to auth via pam, and execute any command on server web1 external_auth: pam: jsmith: - web1: Django eauth: Import the django library, dynamically load the Django model called \'model\'. That model returns a data structure that matches the above for standard eauth. This is what determines who can do what to which machines django: ^model: <stuff returned from django> Active Directory Extended: Users in the AD group \'webadmins\' can run any command on server1 Users in the AD group \'webadmins\' can run test.ping and service.restart on machines that have a computer object in the AD \'webservers\' OU Users in the AD group \'webadmins\' can run commands defined in the custom attribute (custom attribute not implemented yet, this is for future use) ldap: webadmins%: <all users in the AD \'webadmins\' group> - server1: - ldap(OU=webservers,dc=int,dc=bigcompany,dc=com): - test.ping - service.restart - ldap(OU=Domain Controllers,dc=int,dc=bigcompany,dc=com): - allowed_fn_list_attribute^'
@property def auth_data(self):
auth_data = self.opts['external_auth'] merge_lists = self.opts['pillar_merge_lists'] if (('django' in auth_data) and ('^model' in auth_data['django'])): auth_from_django = salt.auth.django.retrieve_auth_entries() auth_data = salt.utils.dictupdate.merge(auth_data, auth_from_django, strategy='list', merge_lists=merge_lists) if (('ldap' in auth_data) and __opts__.get('auth.ldap.activedirectory', False)): auth_data['ldap'] = salt.auth.ldap.__expand_ldap_entries(auth_data['ldap']) log.debug(auth_data['ldap']) return auth_data
'Determine if token auth is valid and yield the adata'
def token(self, adata, load):
try: token = self.loadauth.get_tok(load['token']) except Exception as exc: log.error('Exception occurred when generating auth token: {0}'.format(exc)) (yield {}) if (not token): log.warning('Authentication failure of type "token" occurred.') (yield {}) for sub_auth in adata: for sub_adata in adata: if (token['eauth'] not in adata): continue if (not ((token['name'] in adata[token['eauth']]) | ('*' in adata[token['eauth']]))): continue (yield {'sub_auth': sub_auth, 'token': token}) (yield {})
'Determine if the given eauth is valid and yield the adata'
def eauth(self, adata, load):
for sub_auth in [adata]: if (load['eauth'] not in sub_auth): continue try: name = self.loadauth.load_name(load) if (not ((name in sub_auth[load['eauth']]) | ('*' in sub_auth[load['eauth']]))): continue if (not self.loadauth.time_auth(load)): continue except Exception as exc: log.error('Exception occurred while authenticating: {0}'.format(exc)) continue (yield {'sub_auth': sub_auth, 'name': name}) (yield {})
'Read in the access system to determine if the validated user has requested rights'
def rights_check(self, form, sub_auth, name, load, eauth=None):
if load.get('eauth'): sub_auth = sub_auth[load['eauth']] good = self.ckminions.any_auth(form, (sub_auth[name] if (name in sub_auth) else sub_auth['*']), load.get('fun', None), load.get('arg', None), load.get('tgt', None), load.get('tgt_type', 'glob')) if (not good): if (load.get('fun', '') != 'saltutil.find_job'): return good return good
'Determine what type of authentication is being requested and pass authorization Note: this will check that the user has at least one right that will let him execute "load", this does not deal with conflicting rules'
def rights(self, form, load):
adata = self.auth_data good = False if load.get('token', False): for sub_auth in self.token(self.auth_data, load): if sub_auth: if self.rights_check(form, self.auth_data[sub_auth['token']['eauth']], sub_auth['token']['name'], load, sub_auth['token']['eauth']): return True log.warning('Authentication failure of type "token" occurred.') elif load.get('eauth'): for sub_auth in self.eauth(self.auth_data, load): if sub_auth: if self.rights_check(form, sub_auth['sub_auth'], sub_auth['name'], load, load['eauth']): return True log.warning('Authentication failure of type "eauth" occurred.') return False
'Execute the CLI options to fill in the extra data needed for the defined eauth system'
def cli(self, eauth):
ret = {} if (not eauth): print('External authentication system has not been specified') return ret fstr = '{0}.auth'.format(eauth) if (fstr not in self.auth): print('The specified external authentication system "{0}" is not available'.format(eauth)) return ret args = salt.utils.arg_lookup(self.auth[fstr]) for arg in args['args']: if (arg in self.opts): ret[arg] = self.opts[arg] elif arg.startswith('pass'): ret[arg] = getpass.getpass('{0}: '.format(arg)) else: ret[arg] = input('{0}: '.format(arg)) for (kwarg, default) in list(args['kwargs'].items()): if (kwarg in self.opts): ret['kwarg'] = self.opts[kwarg] else: ret[kwarg] = input('{0} [{1}]: '.format(kwarg, default)) if (('username' in ret) and (not ret['username'])): ret['username'] = salt.utils.get_user() return ret
'Create the token from the CLI and request the correct data to authenticate via the passed authentication mechanism'
def token_cli(self, eauth, load):
load['cmd'] = 'mk_token' load['eauth'] = eauth tdata = self._send_token_request(load) if ('token' not in tdata): return tdata try: with salt.utils.files.set_umask(127): with salt.utils.files.fopen(self.opts['token_file'], 'w+') as fp_: fp_.write(tdata['token']) except (IOError, OSError): pass return tdata
'Request a token from the master'
def mk_token(self, load):
load['cmd'] = 'mk_token' tdata = self._send_token_request(load) return tdata
'Request a token from the master'
def get_token(self, token):
load = {} load['token'] = token load['cmd'] = 'get_token' tdata = self._send_token_request(load) return tdata
'Instantiate an AuthUser object. Takes a user to reprsent, as a string.'
def __init__(self, user):
self.user = user
'Determines if the user is running with sudo Returns True if the user is running with sudo and False if the user is not running with sudo'
def is_sudo(self):
return self.user.startswith('sudo_')
'Determines if the user is the same user as the one running this process Returns True if the user is the same user as the one running this process and False if not.'
def is_running_user(self):
return (self.user == salt.utils.get_user())
'Returns the username of the sudoer, i.e. self.user without the \'sudo_\' prefix.'
def sudo_name(self):
return self.user.split('_', 1)[(-1)]
'Bind to an LDAP directory using passed credentials.'
def __init__(self, uri, server, port, tls, no_verify, binddn, bindpw, anonymous, accountattributename, activedirectory=False):
self.uri = uri self.server = server self.port = port self.tls = tls schema = ('ldaps' if tls else 'ldap') self.binddn = binddn self.bindpw = bindpw if (not HAS_LDAP): raise CommandExecutionError('LDAP connection could not be made, the python-ldap module is not installed. Install python-ldap to use LDAP external auth.') if (self.uri == ''): self.uri = '{0}://{1}:{2}'.format(schema, self.server, self.port) try: if no_verify: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) self.ldap = ldap.initialize('{0}'.format(self.uri)) self.ldap.protocol_version = 3 self.ldap.set_option(ldap.OPT_REFERRALS, 0) if (not anonymous): self.ldap.simple_bind_s(self.binddn, self.bindpw) except Exception as ldap_error: raise CommandExecutionError('Failed to bind to LDAP server {0} as {1}: {2}'.format(self.uri, self.binddn, ldap_error))
'Create a new Tornado IPC server :param str/int socket_path: Path on the filesystem for the socket to bind to. This socket does not need to exist prior to calling this method, but parent directories should. It may also be of type \'int\', in which case it is used as the port for a tcp localhost connection. :param IOLoop io_loop: A Tornado ioloop to handle scheduling :param func payload_handler: A function to customize handling of incoming data.'
def __init__(self, socket_path, io_loop=None, payload_handler=None):
self.socket_path = socket_path self._started = False self.payload_handler = payload_handler self.sock = None self.io_loop = (io_loop or IOLoop.current()) self._closing = False
'Perform the work necessary to start up a Tornado IPC server Blocks until socket is established'
def start(self):
log.trace('IPCServer: binding to socket: {0}'.format(self.socket_path)) if isinstance(self.socket_path, int): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.setblocking(0) self.sock.bind(('127.0.0.1', self.socket_path)) self.sock.listen(128) else: self.sock = tornado.netutil.bind_unix_socket(self.socket_path) tornado.netutil.add_accept_handler(self.sock, self.handle_connection, io_loop=self.io_loop) self._started = True
'Override this to handle the streams as they arrive :param IOStream stream: An IOStream for processing See https://tornado.readthedocs.io/en/latest/iostream.html#tornado.iostream.IOStream for additional details.'
@tornado.gen.coroutine def handle_stream(self, stream):
@tornado.gen.coroutine def _null(msg): raise tornado.gen.Return(None) def write_callback(stream, header): if header.get('mid'): @tornado.gen.coroutine def return_message(msg): pack = salt.transport.frame.frame_msg_ipc(msg, header={'mid': header['mid']}, raw_body=True) (yield stream.write(pack)) return return_message else: return _null if six.PY2: encoding = None else: encoding = 'utf-8' unpacker = msgpack.Unpacker(encoding=encoding) while (not stream.closed()): try: wire_bytes = (yield stream.read_bytes(4096, partial=True)) unpacker.feed(wire_bytes) for framed_msg in unpacker: body = framed_msg['body'] self.io_loop.spawn_callback(self.payload_handler, body, write_callback(stream, framed_msg['head'])) except tornado.iostream.StreamClosedError: log.trace('Client disconnected from IPC {0}'.format(self.socket_path)) break except socket.error as exc: if (exc.errno == 0): log.trace('Exception occured with error number 0, spurious exception: {0}'.format(exc)) else: log.error('Exception occurred while handling stream: {0}'.format(exc)) except Exception as exc: log.error('Exception occurred while handling stream: {0}'.format(exc))
'Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks.'
def close(self):
if self._closing: return self._closing = True if hasattr(self.sock, 'close'): self.sock.close()
'Create a new IPC client IPC clients cannot bind to ports, but must connect to existing IPC servers. Clients can then send messages to the server.'
def __singleton_init__(self, socket_path, io_loop=None):
self.io_loop = (io_loop or tornado.ioloop.IOLoop.current()) self.socket_path = socket_path self._closing = False self.stream = None if six.PY2: encoding = None else: encoding = 'utf-8' self.unpacker = msgpack.Unpacker(encoding=encoding)
'Connect to the IPC socket'
def connect(self, callback=None, timeout=None):
if (hasattr(self, '_connecting_future') and (not self._connecting_future.done())): future = self._connecting_future else: if hasattr(self, '_connecting_future'): self._connecting_future.exc_info() future = tornado.concurrent.Future() self._connecting_future = future self._connect(timeout=timeout) if (callback is not None): def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) return future
'Connect to a running IPCServer'
@tornado.gen.coroutine def _connect(self, timeout=None):
if isinstance(self.socket_path, int): sock_type = socket.AF_INET sock_addr = ('127.0.0.1', self.socket_path) else: sock_type = socket.AF_UNIX sock_addr = self.socket_path self.stream = None if (timeout is not None): timeout_at = (time.time() + timeout) while True: if self._closing: break if (self.stream is None): self.stream = IOStream(socket.socket(sock_type, socket.SOCK_STREAM), io_loop=self.io_loop) try: log.trace('IPCClient: Connecting to socket: {0}'.format(self.socket_path)) (yield self.stream.connect(sock_addr)) self._connecting_future.set_result(True) break except Exception as e: if self.stream.closed(): self.stream = None if ((timeout is None) or (time.time() > timeout_at)): if (self.stream is not None): self.stream.close() self.stream = None self._connecting_future.set_exception(e) break (yield tornado.gen.sleep(1))
'Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks.'
def close(self):
if self._closing: return self._closing = True if ((self.stream is not None) and (not self.stream.closed())): self.stream.close() if (self.io_loop in IPCClient.instance_map): loop_instance_map = IPCClient.instance_map[self.io_loop] key = str(self.socket_path) if (key in loop_instance_map): del loop_instance_map[key]
'Send a message to an IPC socket If the socket is not currently connected, a connection will be established. :param dict msg: The message to be sent :param int timeout: Timeout when sending message (Currently unimplemented)'
@tornado.gen.coroutine def send(self, msg, timeout=None, tries=None):
if (not self.connected()): (yield self.connect()) pack = salt.transport.frame.frame_msg_ipc(msg, raw_body=True) (yield self.stream.write(pack))
'Create a new Tornado IPC server :param dict opts: Salt options :param str/int socket_path: Path on the filesystem for the socket to bind to. This socket does not need to exist prior to calling this method, but parent directories should. It may also be of type \'int\', in which case it is used as the port for a tcp localhost connection. :param IOLoop io_loop: A Tornado ioloop to handle scheduling'
def __init__(self, opts, socket_path, io_loop=None):
self.opts = opts self.socket_path = socket_path self._started = False self.sock = None self.io_loop = (io_loop or IOLoop.current()) self._closing = False self.streams = set()
'Perform the work necessary to start up a Tornado IPC server Blocks until socket is established'
def start(self):
log.trace('IPCMessagePublisher: binding to socket: {0}'.format(self.socket_path)) if isinstance(self.socket_path, int): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.setblocking(0) self.sock.bind(('127.0.0.1', self.socket_path)) self.sock.listen(128) else: self.sock = tornado.netutil.bind_unix_socket(self.socket_path) tornado.netutil.add_accept_handler(self.sock, self.handle_connection, io_loop=self.io_loop) self._started = True
'Send message to all connected sockets'
def publish(self, msg):
if (not len(self.streams)): return pack = salt.transport.frame.frame_msg_ipc(msg, raw_body=True) for stream in self.streams: self.io_loop.spawn_callback(self._write, stream, pack)
'Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks.'
def close(self):
if self._closing: return self._closing = True for stream in self.streams: stream.close() self.streams.clear() if hasattr(self.sock, 'close'): self.sock.close()
'Read a message from an IPC socket The socket must already be connected. The associated IO Loop must NOT be running. :param int timeout: Timeout when receiving message :return: message data if successful. None if timed out. Will raise an exception for all other error conditions.'
def read_sync(self, timeout=None):
if self.saved_data: return self.saved_data.pop(0) self._sync_ioloop_running = True self._read_sync_future = self._read_sync(timeout) self.io_loop.start() self._sync_ioloop_running = False ret_future = self._read_sync_future self._read_sync_future = None return ret_future.result()
'Asynchronously read messages and invoke a callback when they are ready. :param callback: A callback with the received data'
@tornado.gen.coroutine def read_async(self, callback):
while (not self.connected()): try: (yield self.connect(timeout=5)) except tornado.iostream.StreamClosedError: log.trace('Subscriber closed stream on IPC {0} before connect'.format(self.socket_path)) (yield tornado.gen.sleep(1)) except Exception as exc: log.error('Exception occurred while Subscriber connecting: {0}'.format(exc)) (yield tornado.gen.sleep(1)) (yield self._read_async(callback))
'Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks.'
def close(self):
if (not self._closing): IPCClient.close(self) if (self._read_sync_future is not None): self._read_sync_future.exc_info() if (self._read_stream_future is not None): self._read_stream_future.exc_info()
'Prepare the stack objects'
def __prep_stack(self):
global jobber_stack if (not self.stack): if jobber_stack: self.stack = jobber_stack else: self.stack = jobber_stack = self._setup_stack(ryn=self.ryn) log.debug('RAETReqChannel Using Jobber Stack at = {0}\n'.format(self.stack.ha))
'Setup and return the LaneStack and Yard used by by channel when global not already setup such as in salt-call to communicate to-from the minion'
def _setup_stack(self, ryn='manor'):
role = self.opts.get('id') if (not role): emsg = "Missing role('id') required to setup RAETReqChannel." log.error((emsg + '\n')) raise ValueError(emsg) kind = self.opts.get('__role') if (kind not in kinds.APPL_KINDS): emsg = "Invalid application kind = '{0}' for RAETReqChannel.".format(kind) log.error((emsg + '\n')) raise ValueError(emsg) if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]): lanename = 'master' elif (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]): lanename = '{0}_{1}'.format(role, kind) else: emsg = "Unsupported application kind '{0}' for RAETReqChannel.".format(kind) log.error((emsg + '\n')) raise ValueError(emsg) name = ('channel' + nacling.uuid(size=18)) stack = LaneStack(name=name, lanename=lanename, sockdirpath=self.opts['sock_dir']) stack.Pk = raeting.PackKind.pack stack.addRemote(RemoteYard(stack=stack, name=ryn, lanename=lanename, dirpath=self.opts['sock_dir'])) log.debug('Created Channel Jobber Stack {0}\n'.format(stack.name)) return stack
'We don\'t need to do the crypted_transfer_decode_dictentry routine for raet, just wrap send.'
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
return self.send(load, tries, timeout)
'Send a message load and wait for a relative reply One shot wonder'
def send(self, load, tries=3, timeout=60, raw=False):
self.__prep_stack() tried = 1 start = time.time() track = nacling.uuid(18) src = (None, self.stack.local.name, track) self.route = {'src': src, 'dst': self.dst} msg = {'route': self.route, 'load': load} self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid) while (track not in jobber_rxMsgs): self.stack.serviceAll() while self.stack.rxMsgs: (msg, sender) = self.stack.rxMsgs.popleft() jobber_rxMsgs[msg['route']['dst'][2]] = msg continue if (track in jobber_rxMsgs): break if ((time.time() - start) > timeout): if (tried >= tries): raise ValueError("Message send timed out after '{0} * {1}' secs. route = {2} track = {3} load={4}".format(tries, timeout, self.route, track, load)) self.stack.transmit(msg, self.stack.nameRemotes['manor'].uid) tried += 1 time.sleep(0.01) return jobber_rxMsgs.pop(track).get('return', {})
'Do anything necessary pre-fork. Since this is on the master side this will primarily be bind and listen (or the equivalent for your network library)'
def pre_fork(self, process_manager):
pass
'Do anything you need post-fork. This should handle all incoming payloads and call payload_handler. You will also be passed io_loop, for all of your async needs'
def post_fork(self, payload_handler, io_loop):
pass
'Do anything necessary pre-fork. Since this is on the master side this will primarily be used to create IPC channels and create our daemon process to do the actual publishing'
def pre_fork(self, process_manager):
pass
'Publish "load" to minions'
def publish(self, load):
raise NotImplementedError()
'Send "load" to the master.'
def send(self, load, tries=3, timeout=60, raw=False):
raise NotImplementedError()
'Send "load" to the master in a way that the load is only readable by the minion and the master (not other minions etc.)'
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
raise NotImplementedError()
'Send load across IPC push'
def send(self, load, tries=3, timeout=60):
raise NotImplementedError()
'Send "load" to the master.'
def send(self, load, tries=3, timeout=60, raw=False):
raise NotImplementedError()
'Send "load" to the master in a way that the load is only readable by the minion and the master (not other minions etc.)'
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
raise NotImplementedError()
'Return a future which completes when connected to the remote publisher'
def connect(self):
raise NotImplementedError()
'When jobs are received pass them (decoded) to callback'
def on_recv(self, callback):
raise NotImplementedError()
'If we have additional IPC transports other than UxD and TCP, add them here'
@staticmethod def factory(opts, **kwargs):
import salt.transport.ipc return salt.transport.ipc.IPCMessageClient(opts, **kwargs)
'If we have additional IPC transports other than UXD and TCP, add them here'
@staticmethod def factory(opts, **kwargs):
import salt.transport.ipc return salt.transport.ipc.IPCMessageServer(opts, **kwargs)
'Only create one instance of channel per __key()'
def __new__(cls, opts, **kwargs):
io_loop = (kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()) if (io_loop not in cls.instance_map): cls.instance_map[io_loop] = weakref.WeakValueDictionary() loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) obj = loop_instance_map.get(key) if (obj is None): log.debug('Initializing new AsyncTCPReqChannel for {0}'.format(key)) obj = object.__new__(cls) obj.__singleton_init__(opts, **kwargs) loop_instance_map[key] = obj else: log.debug('Re-using AsyncTCPReqChannel for {0}'.format(key)) return obj
'In case of authentication errors, try to renegotiate authentication and retry the method. Indeed, we can fail too early in case of a master restart during a minion state execution call'
@tornado.gen.coroutine def _crypted_transfer(self, load, tries=3, timeout=60):
@tornado.gen.coroutine def _do_transfer(): data = (yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)) if data: data = self.auth.crypticle.loads(data) if six.PY3: data = salt.transport.frame.decode_embedded_strs(data) raise tornado.gen.Return(data) if (not self.auth.authenticated): (yield self.auth.authenticate()) try: ret = (yield _do_transfer()) raise tornado.gen.Return(ret) except salt.crypt.AuthenticationError: (yield self.auth.authenticate()) ret = (yield _do_transfer()) raise tornado.gen.Return(ret)
'Send a request, return a future which will complete when we send the message'
@tornado.gen.coroutine def send(self, load, tries=3, timeout=60, raw=False):
try: if (self.crypt == 'clear'): ret = (yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)) else: ret = (yield self._crypted_transfer(load, tries=tries, timeout=timeout)) except tornado.iostream.StreamClosedError: raise SaltClientError('Connection to master lost') raise tornado.gen.Return(ret)
'Send the minion id to the master so that the master may better track the connection state of the minion. In case of authentication errors, try to renegotiate authentication and retry the method.'
@tornado.gen.coroutine def send_id(self, tok, force_auth):
load = {'id': self.opts['id'], 'tok': tok} @tornado.gen.coroutine def _do_transfer(): msg = self._package_load(self.auth.crypticle.dumps(load)) package = salt.transport.frame.frame_msg(msg, header=None) (yield self.message_client.write_to_stream(package)) raise tornado.gen.Return(True) if (force_auth or (not self.auth.authenticated)): count = 0 while ((count <= self.opts['tcp_authentication_retries']) or (self.opts['tcp_authentication_retries'] < 0)): try: (yield self.auth.authenticate()) break except SaltClientError as exc: log.debug(exc) count += 1 try: ret = (yield _do_transfer()) raise tornado.gen.Return(ret) except salt.crypt.AuthenticationError: (yield self.auth.authenticate()) ret = (yield _do_transfer()) raise tornado.gen.Return(ret)
'Register an on_recv callback'
def on_recv(self, callback):
if (callback is None): return self.message_client.on_recv(callback) @tornado.gen.coroutine def wrap_callback(body): if (not isinstance(body, dict)): body = msgpack.loads(body) if six.PY3: body = salt.transport.frame.decode_embedded_strs(body) ret = (yield self._decode_payload(body)) callback(ret) return self.message_client.on_recv(wrap_callback)
'Pre-fork we need to create the zmq router device'
def pre_fork(self, process_manager):
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager) if USE_LOAD_BALANCER: self.socket_queue = multiprocessing.Queue() process_manager.add_process(LoadBalancerServer, args=(self.opts, self.socket_queue)) elif (not salt.utils.platform.is_windows()): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _set_tcp_keepalive(self._socket, self.opts) self._socket.setblocking(0) self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
'After forking we need to create all of the local sockets to listen to the router payload_handler: function to call with your payloads'
def post_fork(self, payload_handler, io_loop):
self.payload_handler = payload_handler self.io_loop = io_loop self.serial = salt.payload.Serial(self.opts) if USE_LOAD_BALANCER: self.req_server = LoadBalancerWorker(self.socket_queue, self.handle_message, io_loop=self.io_loop, ssl_options=self.opts.get('ssl')) else: if salt.utils.platform.is_windows(): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _set_tcp_keepalive(self._socket, self.opts) self._socket.setblocking(0) self._socket.bind((self.opts['interface'], int(self.opts['ret_port']))) self.req_server = SaltMessageServer(self.handle_message, io_loop=self.io_loop, ssl_options=self.opts.get('ssl')) self.req_server.add_socket(self._socket) self._socket.listen(self.backlog) salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)