desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Assign .ushers by parsing opts'
| def action(self):
| masters = 'master'
port = None
if self.opts.value.get('cluster_mode', False):
masters = 'cluster_masters'
self.ushers.value = daemons.extract_masters(self.opts.value, masters=masters, port=port)
|
'Assign .ushers by parsing opts'
| def action(self):
| masters = 'cluster_masters'
port = 'raet_port'
self.ushers.value = daemons.extract_masters(self.opts.value, masters=masters, port=port, raise_if_empty=False)
|
'Populate loads from masters in stack.remotes'
| def action(self, **kwa):
| if self.opts.value.get('cluster_mode'):
for remote in list(self.stack.value.remotes.values()):
if (remote.kind == kinds.applKinds.master):
self.masters.value[remote.name] = odict(load=0.0, expire=self.store.stamp)
|
'Assign class defaults'
| def _prepare(self):
| RoadStack.Bk = raeting.BodyKind.msgpack.value
RoadStack.JoinentTimeout = 0.0
|
'enter action
should only run once to setup road stack.
moved from _prepare so can do clean up before stack is initialized
do salt raet road stack setup at enter'
| def action(self):
| kind = self.opts.value['__role']
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}'.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
role = self.opts.value.get('id', '')
if (not role):
emsg = 'Missing role required to setup RoadStack.'
log.error((emsg + '\n'))
raise ValueError(emsg)
name = '{0}_{1}'.format(role, kind)
main = self.opts.value.get('raet_main', self.local.data.main)
mutable = self.opts.value.get('raet_mutable', self.local.data.mutable)
always = self.opts.value.get('open_mode', False)
mutable = (mutable or always)
uid = self.local.data.uid
if (kind == kinds.APPL_KIND_NAMES[kinds.applKinds.caller]):
ha = (self.opts.value['interface'], self.opts.value['raet_alt_port'])
else:
ha = (self.opts.value['interface'], self.opts.value['raet_port'])
basedirpath = os.path.abspath(os.path.join(self.opts.value['cachedir'], 'raet'))
txMsgs = self.txmsgs.value
rxMsgs = self.rxmsgs.value
keep = salting.SaltKeep(opts=self.opts.value, basedirpath=basedirpath, stackname=name)
roledata = keep.loadLocalRoleData()
sighex = (roledata['sighex'] or self.local.data.sighex)
prihex = (roledata['prihex'] or self.local.data.prihex)
bufcnt = self.opts.value.get('raet_road_bufcnt', self.local.data.bufcnt)
self.stack.value = RoadStack(store=self.store, keep=keep, name=name, uid=uid, ha=ha, role=role, sigkey=sighex, prikey=prihex, main=main, kind=kinds.APPL_KINDS[kind], mutable=mutable, txMsgs=txMsgs, rxMsgs=rxMsgs, period=3.0, offset=0.5, bufcnt=bufcnt)
if self.opts.value.get('raet_clear_remotes'):
for remote in list(self.stack.value.remotes.values()):
self.stack.value.removeRemote(remote, clear=True)
self.stack.puid = self.stack.value.Uid
|
'Close udp socket'
| def action(self, **kwa):
| if (self.stack.value and isinstance(self.stack.value, RoadStack)):
self.stack.value.server.close()
|
'Join with all masters'
| def action(self, **kwa):
| stack = self.stack.value
if (stack and isinstance(stack, RoadStack)):
refresh_masters = (self.opts.value.get('raet_clear_remote_masters', True) or (not stack.remotes))
refresh_all = (self.opts.value.get('raet_clear_remotes', True) or (not stack.remotes))
if refresh_masters:
for remote in list(stack.remotes.values()):
if (remote.kind == kinds.applKinds.master):
stack.removeRemote(remote, clear=True)
if refresh_all:
for remote in list(stack.remotes.values()):
stack.removeRemote(remote, clear=True)
if (refresh_all or refresh_masters):
stack.puid = stack.Uid
ex = SaltException('Unable to connect to any master')
for master in self.ushers.value:
try:
mha = master['external']
stack.addRemote(RemoteEstate(stack=stack, fuid=0, sid=0, ha=mha, kind=kinds.applKinds.master))
except gaierror as ex:
log.warning('Unable to connect to master {0}: {1}'.format(mha, ex))
if (self.opts.value.get('master_type') != 'failover'):
raise ex
if (not stack.remotes):
raise ex
for remote in list(stack.remotes.values()):
if (remote.kind == kinds.applKinds.master):
stack.join(uid=remote.uid, timeout=0.0)
|
'Update .status share'
| def action(self, **kwa):
| stack = self.stack.value
joined = False
if (stack and isinstance(stack, RoadStack)):
if stack.remotes:
joined = any([remote.joined for remote in list(stack.remotes.values()) if (remote.kind == kinds.applKinds.master)])
self.status.update(joined=joined)
|
'Update .status share'
| def action(self, **kwa):
| stack = self.stack.value
rejected = False
if (stack and isinstance(stack, RoadStack)):
if stack.remotes:
rejected = all([(remote.acceptance == raeting.Acceptance.rejected.value) for remote in stack.remotes.values() if (remote.kind == kinds.applKinds.master)])
else:
rejected = True
self.status.update(rejected=rejected)
|
'Receive any udp packets on server socket and put in rxes
Send any packets in txes'
| def action(self, **kwa):
| stack = self.stack.value
if (stack and isinstance(stack, RoadStack)):
for remote in stack.remotes.values():
if (remote.kind == kinds.applKinds.master):
stack.allow(uid=remote.uid, timeout=0.0)
|
'Update .status share'
| def action(self, **kwa):
| stack = self.stack.value
allowed = False
if (stack and isinstance(stack, RoadStack)):
if stack.remotes:
allowed = any([remote.allowed for remote in list(stack.remotes.values()) if (remote.kind == kinds.applKinds.master)])
self.status.update(allowed=allowed)
|
'Manage the presence of any remotes
availables is set of names of alive remotes which are also allowed
changeds is is share with two fields:
plus is set of names of newly available remotes
minus is set of names of newly unavailable remotes
alloweds is dict of allowed remotes keyed by name
aliveds is dict of alived remotes keyed by name
reapeds is dict of reaped remotes keyed by name'
| def action(self, **kwa):
| stack = self.stack.value
if (stack and isinstance(stack, RoadStack)):
stack.manage(cascade=True)
self.availables.value = set(self.stack.value.availables)
self.changeds.update(plus=set(self.stack.value.changeds['plus']))
self.changeds.update(minus=set(self.stack.value.changeds['minus']))
self.alloweds.value = odict(self.stack.value.alloweds)
self.aliveds.value = odict(self.stack.value.aliveds)
self.reapeds.value = odict(self.stack.value.reapeds)
console.concise(' Manage {0}.\nAvailables: {1}\nChangeds:\nPlus: {2}\nMinus: {3}\nAlloweds: {4}\nAliveds: {5}\nReapeds: {6}\n'.format(stack.name, self.availables.value, self.changeds.data.plus, self.changeds.data.minus, self.alloweds.value, self.aliveds.value, self.reapeds.value))
self._fire_events()
|
'Queue up message'
| def action(self, **kwa):
| rxMsgs = self.rxmsgs.value
while rxMsgs:
(msg, name) = rxMsgs.popleft()
console.terse('\nReceived....\n{0}\n'.format(msg))
|
'Return the functions and the returners loaded up from the loader
module'
| def _load_modules(self):
| if (self.grain_time.value is None):
self.grain_time.value = 0.0
modules_max_memory = False
if ((self.opts.value.get('modules_max_memory', (-1)) > 0) and HAS_PSUTIL and HAS_RESOURCE):
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts.value['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
(rss, vms) = psutil.Process(os.getpid()).memory_info()
mem_limit = ((rss + vms) + self.opts.value['modules_max_memory'])
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif (self.opts.value.get('modules_max_memory', (-1)) > 0):
if (not HAS_PSUTIL):
log.error('Unable to enforce modules_max_memory because psutil is missing')
if (not HAS_RESOURCE):
log.error('Unable to enforce modules_max_memory because resource is missing')
if (((time.time() - self.grain_time.value) > 300.0) or self.module_refresh.value):
self.opts.value['grains'] = salt.loader.grains(self.opts.value)
self.grain_time.value = time.time()
self.grains.value = self.opts.value['grains']
self.utils.value = salt.loader.utils(self.opts.value)
self.modules.value = salt.loader.minion_mods(self.opts.value, utils=self.utils.value)
self.returners.value = salt.loader.returners(self.opts.value, self.modules.value)
self.module_executors.value = salt.loader.executors(self.opts.value, self.modules.value)
self.utils.value.clear()
self.modules.value.clear()
self.returners.value.clear()
self.module_executors.value.clear()
if (modules_max_memory is True):
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
self.module_refresh.value = False
|
'Initial pillar'
| def action(self):
| available_masters = [remote for remote in list(self.road_stack.value.remotes.values()) if remote.allowed]
while (not available_masters):
available_masters = [remote for remote in self.road_stack.value.remotes.values() if remote.allowed]
time.sleep(0.1)
random_master = self.opts.value.get('random_master')
if random_master:
master = available_masters[random.randint(0, (len(available_masters) - 1))]
else:
master = available_masters[0]
self.master_estate_name.value = master.name
route = {'src': (self.road_stack.value.local.name, None, None), 'dst': (master.name, None, 'remote_cmd')}
load = {'id': self.opts.value['id'], 'grains': self.grains.value, 'saltenv': self.opts.value['environment'], 'ver': '2', 'cmd': '_pillar'}
self.road_stack.value.transmit({'route': route, 'load': load}, uid=master.uid)
self.road_stack.value.serviceAll()
while True:
time.sleep(0.1)
while self.road_stack.value.rxMsgs:
(msg, sender) = self.road_stack.value.rxMsgs.popleft()
self.pillar.value = msg.get('return', {})
if (self.pillar.value is None):
continue
self.opts.value['pillar'] = self.pillar.value
self.pillar_refresh.value = False
return
self.road_stack.value.serviceAll()
|
'Map opts and make the schedule object'
| def _prepare(self):
| self.utils.value = salt.loader.utils(self.opts.value)
self.modules.value = salt.loader.minion_mods(self.opts.value, utils=self.utils.value)
self.returners.value = salt.loader.returners(self.opts.value, self.modules.value)
self.schedule = salt.utils.schedule.Schedule(self.opts.value, self.modules.value, self.returners.value)
|
'Eval the schedule'
| def action(self):
| self.schedule.eval()
|
'Set up required objects and queues'
| def _prepare(self):
| pass
|
'Run once at enter'
| def action(self):
| kind = self.opts.value['__role']
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}' for manor lane.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]):
lanename = 'master'
elif (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]):
role = self.opts.value.get('id', '')
if (not role):
emsg = 'Missing role required to setup manor Lane.'
log.error((emsg + '\n'))
raise ValueError(emsg)
lanename = '{0}_{1}'.format(role, kind)
else:
emsg = "Unsupported application kind = '{0}' for manor Lane.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
bufcnt = self.opts.value.get('raet_lane_bufcnt', self.local.data.bufcnt)
name = 'manor'
self.stack.value = LaneStack(name=name, lanename=lanename, sockdirpath=self.opts.value['sock_dir'], bufcnt=bufcnt)
self.stack.value.Pk = raeting.PackKind.pack.value
self.event_yards.value = set()
self.local_cmd.value = deque()
self.remote_cmd.value = deque()
self.fun.value = deque()
self.event.value = deque()
self.event_req.value = deque()
self.presence_req.value = deque()
self.stats_req.value = deque()
self.publish.value = deque()
self.worker_verify.value = salt.utils.stringutils.random()
if self.opts.value.get('worker_threads'):
worker_seed = []
for index in range(self.opts.value['worker_threads']):
worker_seed.append('worker{0}'.format((index + 1)))
self.workers.value = itertools.cycle(worker_seed)
|
'Close uxd socket'
| def action(self, **kwa):
| if (self.stack.value and isinstance(self.stack.value, LaneStack)):
self.stack.value.server.close()
|
'Process inboud queues'
| def action(self):
| self.road_stack.value.serviceAll()
|
'Process inboud queues'
| def action(self):
| self.road_stack.value.serviceAllRx()
|
'Process inbound queues'
| def action(self):
| self.road_stack.value.serviceAllTx()
|
'Process inboud queues'
| def action(self):
| self.lane_stack.value.serviceAllRx()
|
'Process outbound queues'
| def action(self):
| self.lane_stack.value.serviceAllTx()
|
'Send to the right queue
msg is the message body dict
sender is the unique name of the remote estate that sent the message'
| def _process_road_rxmsg(self, msg, sender):
| pass
|
'Send uxd messages tot he right queue or forward them to the correct
yard etc.
msg is message body dict
sender is unique name of remote that sent the message'
| def _process_lane_rxmsg(self, msg, sender):
| pass
|
'Assign and return the name of the estate for the default master or empty if none
If the default master is no longer available then selects one of the available
masters
If clustered is True then use load balancing algorithm to select master'
| def _get_master_estate_name(self, clustered=False):
| opts = self.opts.value
master = self.road_stack.value.nameRemotes.get(self.master_estate_name.value)
if ((not master) or (not master.alived)):
available_masters = [remote for remote in six.Iterator(self.road_stack.value.remotes) if remote.alived]
if available_masters:
random_master = opts.get('random_master')
if random_master:
master = available_masters[random.randint(0, (len(available_masters) - 1))]
else:
master = available_masters[0]
else:
master = None
self.master_estate_name.value = (master.name if master else '')
return self.master_estate_name.value
|
'Return set that is intersection of associated minion estates for
roles in minions and the set of available minion estates.'
| def _availablize(self, minions):
| suffix = '_{0}'.format(kinds.APPL_KIND_NAMES[kinds.applKinds.minion])
return list((set(minions) & set((name.rstrip(suffix) for name in self.availables.value))))
|
'Process the messages!'
| def action(self):
| while self.road_stack.value.rxMsgs:
(msg, sender) = self.road_stack.value.rxMsgs.popleft()
self._process_road_rxmsg(msg=msg, sender=sender)
while self.laters.value:
(msg, sender) = self.laters.value.popleft()
self.lane_stack.value.rxMsgs.append((msg, sender))
while self.lane_stack.value.rxMsgs:
(msg, sender) = self.lane_stack.value.rxMsgs.popleft()
self._process_lane_rxmsg(msg=msg, sender=sender)
|
'Send to the right queue
msg is the message body dict
sender is the unique name of the remote estate that sent the message'
| def _process_road_rxmsg(self, msg, sender):
| try:
(s_estate, s_yard, s_share) = msg['route']['src']
(d_estate, d_yard, d_share) = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
return
if (s_estate is None):
return
log.debug('**** Road Router rxMsg **** id={0} estate={1} yard={2}\n msg= {3}\n'.format(self.opts.value['id'], self.road_stack.value.local.name, self.lane_stack.value.local.name, msg))
if ((d_estate is not None) and (d_estate != self.road_stack.value.local.name)):
log.error('Road Router Received message for wrong estate: {0}'.format(d_estate))
return
if (d_yard is not None):
if (d_yard in self.lane_stack.value.nameRemotes):
self.lane_stack.value.transmit(msg, self.lane_stack.value.nameRemotes[d_yard].uid)
return
if (d_share is None):
log.error('Received message without share: {0}'.format(msg))
return
elif (d_share == 'event_fire'):
self.event.value.append(msg)
return
elif (d_share == 'local_cmd'):
log.error('Received local command remotely! Ignoring: {0}'.format(msg))
return
elif (d_share == 'remote_cmd'):
if ('load' in msg):
role = self.road_stack.value.nameRemotes[sender].role
msg['load']['id'] = role
self.lane_stack.value.transmit(msg, self.lane_stack.value.fetchUidByName(next(self.workers.value)))
|
'Send uxd messages tot he right queue or forward them to the correct
yard etc.
msg is message body dict
sender is unique name of remote that sent the message'
| def _process_lane_rxmsg(self, msg, sender):
| try:
(s_estate, s_yard, s_share) = msg['route']['src']
(d_estate, d_yard, d_share) = msg['route']['dst']
except (ValueError, IndexError):
log.error('Lane Router Received invalid message: {0}'.format(msg))
return
if (s_yard is None):
return
if (s_estate is None):
s_estate = self.road_stack.value.local.name
msg['route']['src'] = (s_estate, s_yard, s_share)
log.debug('**** Lane Router rxMsg **** id={0} estate={1} yard={2}\n msg={3}\n'.format(self.opts.value['id'], self.road_stack.value.local.name, self.lane_stack.value.local.name, msg))
if (d_estate is None):
pass
elif (d_estate != self.road_stack.value.local.name):
if (d_estate in self.road_stack.value.nameRemotes):
self.road_stack.value.message(msg, self.road_stack.value.nameRemotes[d_estate].uid)
return
if (d_share == 'pub_ret'):
msg['return']['ret']['minions'] = self._availablize(msg['return']['ret']['minions'])
if (msg.get('__worker_verify') == self.worker_verify.value):
self.publish.value.append(msg)
if (d_yard is None):
pass
elif (d_yard != self.lane_stack.value.local.name):
if (d_yard in self.lane_stack.value.nameRemotes):
self.lane_stack.value.transmit(msg, self.lane_stack.value.nameRemotes[d_yard].uid)
return
if (d_share is None):
log.error('Lane Router Received message without share: {0}'.format(msg))
return
elif (d_share == 'local_cmd'):
self.lane_stack.value.transmit(msg, self.lane_stack.value.fetchUidByName(next(self.workers.value)))
elif (d_share == 'event_req'):
self.event_req.value.append(msg)
elif (d_share == 'event_fire'):
self.event.value.append(msg)
elif (d_share == 'presence_req'):
self.presence_req.value.append(msg)
elif (d_share == 'stats_req'):
self.stats_req.value.append(msg)
|
'Send to the right queue
msg is the message body dict
sender is the unique name of the remote estate that sent the message'
| def _process_road_rxmsg(self, msg, sender):
| try:
(s_estate, s_yard, s_share) = msg['route']['src']
(d_estate, d_yard, d_share) = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
return
if (s_estate is None):
return
log.debug('**** Road Router rxMsg **** id={0} estate={1} yard={2}\n msg= {3}\n'.format(self.opts.value['id'], self.road_stack.value.local.name, self.lane_stack.value.local.name, msg))
if ((d_estate is not None) and (d_estate != self.road_stack.value.local.name)):
log.error('Road Router Received message for wrong estate: {0}'.format(d_estate))
return
if (d_yard is not None):
if (d_yard in self.lane_stack.value.nameRemotes):
self.lane_stack.value.transmit(msg, self.lane_stack.value.nameRemotes[d_yard].uid)
return
return
if (d_share is None):
log.error('Received message without share: {0}'.format(msg))
return
elif (d_share == 'fun'):
if (self.road_stack.value.kind == kinds.applKinds.minion):
self.fun.value.append(msg)
elif (d_share == 'stats_req'):
self.stats_req.value.append(msg)
|
'Send uxd messages tot he right queue or forward them to the correct
yard etc.
msg is message body dict
sender is unique name of remote that sent the message'
| def _process_lane_rxmsg(self, msg, sender):
| try:
(s_estate, s_yard, s_share) = msg['route']['src']
(d_estate, d_yard, d_share) = msg['route']['dst']
except (ValueError, IndexError):
log.error('Lane Router Received invalid message: {0}'.format(msg))
return
if (s_yard is None):
return
if (s_estate is None):
s_estate = self.road_stack.value.local.name
msg['route']['src'] = (s_estate, s_yard, s_share)
log.debug('**** Lane Router rxMsg **** id={0} estate={1} yard={2}\n msg={3}\n'.format(self.opts.value['id'], self.road_stack.value.local.name, self.lane_stack.value.local.name, msg))
if (d_estate is None):
pass
elif (d_estate != self.road_stack.value.local.name):
if (d_estate in self.road_stack.value.nameRemotes):
self.road_stack.value.message(msg, self.road_stack.value.nameRemotes[d_estate].uid)
return
if (d_yard is None):
pass
elif (d_yard != self.lane_stack.value.local.name):
if (d_yard in self.lane_stack.value.nameRemotes):
self.lane_stack.value.transmit(msg, self.lane_stack.value.nameRemotes[d_yard].uid)
return
return
if (d_share is None):
log.error('Lane Router Received message without share: {0}'.format(msg))
return
elif (d_share == 'event_req'):
self.event_req.value.append(msg)
elif (d_share == 'event_fire'):
self.event.value.append(msg)
elif (d_share == 'remote_cmd'):
if (not self.road_stack.value.remotes):
log.error('**** Lane Router: Missing joined master. Unable to route remote_cmd. Requeuing'.format())
self.laters.value.append((msg, sender))
return
d_estate = self._get_master_estate_name(clustered=self.opts.get('cluster_mode', False))
if (not d_estate):
log.error("**** Lane Router: No available destination estate for 'remote_cmd'.Unable to route. Requeuing".format())
self.laters.value.append((msg, sender))
return
msg['route']['dst'] = (d_estate, d_yard, d_share)
log.debug("**** Lane Router: Missing destination estate for 'remote_cmd'. Using default route={0}.".format(msg['route']['dst']))
self.road_stack.value.message(msg, self.road_stack.value.nameRemotes[d_estate].uid)
|
'Assign and return the name of the estate for the default master or empty if none
If the default master is no longer available then selects one of the available
masters'
| def _get_master_estate_name(self, clustered=False):
| opts = self.opts.value
master = self.road_stack.value.nameRemotes.get(self.master_estate_name.value)
if ((not master) or (not master.alived)):
available_masters = [remote for remote in list(self.road_stack.value.remotes.values()) if remote.alived]
if available_masters:
random_master = opts.get('random_master')
if random_master:
master = available_masters[random.randint(0, (len(available_masters) - 1))]
else:
master = available_masters[0]
else:
master = None
self.master_estate_name.value = (master.name if master else '')
return self.master_estate_name.value
|
'Return set that is intersection of associated minion estates for
roles in minions and the set of available minion estates.'
| def _availablize(self, minions):
| suffix = '_{0}'.format(kinds.APPL_KIND_NAMES[kinds.applKinds.minion])
return list((set(minions) & set((name.rstrip(suffix) for name in self.availables.value))))
|
'register an incoming event request with the requesting yard id'
| def _register_event_yard(self, msg):
| self.event_yards.value.add(msg['route']['src'][1])
|
'Forward an event message to all subscribed yards
Event message has a route'
| def _forward_event(self, msg):
| rm_ = []
if (msg.get('tag') == 'pillar_refresh'):
self.pillar_refresh.value = True
if (msg.get('tag') == 'module_refresh'):
self.module_refresh.value = True
for y_name in self.event_yards.value:
if (y_name not in self.lane_stack.value.nameRemotes):
rm_.append(y_name)
continue
self.lane_stack.value.transmit(msg, self.lane_stack.value.fetchUidByName(y_name))
self.lane_stack.value.serviceAll()
for y_name in rm_:
self.event_yards.value.remove(y_name)
|
'Register event requests
Iterate over the registered event yards and fire!'
| def action(self):
| while self.event_req.value:
self._register_event_yard(self.event_req.value.popleft())
while self.event.value:
self._forward_event(self.event.value.popleft())
|
'Forward an event message to all subscribed yards
Event message has a route
Also rebroadcast to all masters in cluster'
| def _forward_event(self, msg):
| super(SaltRaetEventerMaster, self)._forward_event(msg)
if self.opts.value.get('cluster_mode'):
if (msg.get('origin') is None):
masters = (self.availables.value & set((remote.name for remote in list(self.road_stack.value.remotes.values()) if (remote.kind == kinds.applKinds.master))))
for name in masters:
remote = self.road_stack.value.nameRemotes[name]
msg['origin'] = self.road_stack.value.name
(s_estate, s_yard, s_share) = msg['route']['src']
msg['route']['src'] = (self.road_stack.value.name, s_yard, s_share)
msg['route']['dst'] = (remote.name, None, 'event_fire')
self.road_stack.value.message(msg, remote.uid)
|
'Forward an presence message to all subscribed yards
Presence message has a route'
| def _send_presence(self, msg):
| y_name = msg['route']['src'][1]
if (y_name not in self.lane_stack.value.nameRemotes):
pass
else:
if (('data' in msg) and ('state' in msg['data'])):
state = msg['data']['state']
else:
state = None
if (state in [None, 'available', 'present']):
present = odict()
for name in self.availables.value:
minion = self.aliveds.value.get(name, None)
present[name] = (minion.ha[0] if minion else None)
data = {'present': present}
else:
states = {'joined': self.alloweds, 'allowed': self.alloweds, 'alived': self.aliveds, 'reaped': self.reapeds}
try:
minions = states[state].value
except KeyError:
log.error('Lane Router Received invalid message: {0}'.format(msg))
return
result = odict()
for name in minions:
result[name] = minions[name].ha[0]
data = {state: result}
tag = tagify('present', 'presence')
route = {'dst': (None, None, 'event_fire'), 'src': (None, self.lane_stack.value.local.name, None)}
msg = {'route': route, 'tag': tag, 'data': data}
self.lane_stack.value.transmit(msg, self.lane_stack.value.fetchUidByName(y_name))
self.lane_stack.value.serviceAll()
|
'Register presence requests
Iterate over the registered presence yards and fire!'
| def action(self):
| while self.presence_req.value:
self._send_presence(self.presence_req.value.popleft())
|
'Forward a stats message to all subscribed yards
Stats message has a route'
| def _send_stats(self, msg):
| pass
|
'Iterate over the registered stats requests and fire!'
| def action(self):
| while self.stats_req.value:
self._send_stats(self.stats_req.value.popleft())
|
'Forward a stats message to all subscribed yards
Stats message has a route'
| def _send_stats(self, msg):
| y_name = msg['route']['src'][1]
if (y_name not in self.lane_stack.value.nameRemotes):
return
stats = self._get_stats(msg.get('tag'))
if (stats is None):
return
route = {'dst': (None, None, 'event_fire'), 'src': (None, self.lane_stack.value.local.name, None)}
repl = {'route': route, 'tag': msg.get('tag'), 'data': stats}
self.lane_stack.value.transmit(repl, self.lane_stack.value.fetchUidByName(y_name))
self.lane_stack.value.serviceAll()
|
'Forward a stats message to all subscribed yards
Stats message has a route'
| def _send_stats(self, msg):
| (s_estate, s_yard, s_share) = msg['route']['src']
if (s_estate not in self.road_stack.value.nameRemotes):
return
stats = self._get_stats(msg.get('tag'))
if (stats is None):
return
route = {'dst': (s_estate, s_yard, 'event_fire'), 'src': (self.road_stack.value.name, self.lane_stack.value.name, None)}
repl = {'route': route, 'tag': msg.get('tag'), 'data': stats}
self.road_stack.value.transmit(repl, self.road_stack.value.fetchUidByName(s_estate))
self.road_stack.value.serviceAll()
|
'Publish the message out to the targeted minions'
| def _publish(self, pub_msg):
| stack = self.stack.value
pub_data = pub_msg['return']
minions = (self.availables.value & set((remote.name for remote in list(stack.remotes.values()) if (remote.kind in [kinds.applKinds.minion, kinds.applKinds.syndic]))))
for minion in minions:
uid = self.stack.value.fetchUidByName(minion)
if uid:
route = {'dst': (minion, None, 'fun'), 'src': (self.stack.value.local.name, None, None)}
msg = {'route': route, 'pub': pub_data['pub']}
self.stack.value.message(msg, uid)
|
'Pop the publish queue and publish the requests!'
| def action(self):
| while self.publish.value:
self._publish(self.publish.value.popleft())
|
'Only call once, this will start the engine processes'
| def action(self):
| salt.engines.start_engines(self.opts.value, self.proc_mgr.value)
|
'Run the beacons'
| def action(self):
| self.beacon.value = salt.beacons.Beacon(self.opts.value, self.modules.value)
|
'Run the beacons'
| def action(self):
| if ('config.merge' in self.modules.value):
b_conf = self.modules.value['config.merge']('beacons')
if b_conf:
try:
events = self.beacon.value.process(b_conf)
self.master_events.value.extend(events)
self.event.value.extend(events)
except Exception:
log.error('Error in the beacon system: ', exc_info=True)
return []
|
'Takes a template and a job and fills the template with
fake return data associated with the job'
| def _fill_tmpl(self, pub):
| msg = {'load': {'fun_args': [], 'jid': pub['return']['pub']['jid'], 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': u'test.ping', 'id': 'silver'}, 'route': {'src': (u'silver_minion', u'jobber50e73ccefd052167c7', 'jid_ret'), 'dst': (u'silver_master_master', None, 'remote_cmd')}}
log.debug('Dummy publisher faking return with: {0}'.format(msg))
return msg
|
'Map opts for convenience'
| def _prepare(self):
| self.opts = self.opts_store.value
self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'])
self.serial = salt.payload.Serial(self.opts)
self.executors.value = {}
|
'Setup and return the LaneStack and Yard used by the jobber yard
to communicate with the minion manor yard'
| def _setup_jobber_stack(self):
| role = self.opts.get('id', '')
if (not role):
emsg = 'Missing role required to setup Jobber Lane.'
log.error((emsg + '\n'))
raise ValueError(emsg)
kind = self.opts['__role']
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}' for Jobber lane.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
if (kind == 'minion'):
lanename = '{0}_{1}'.format(role, kind)
else:
emsg = "Unsupported application kind = '{0}' for Jobber Lane.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
sockdirpath = self.opts['sock_dir']
name = ('jobber' + nacling.uuid(size=18))
stack = LaneStack(name=name, lanename=lanename, sockdirpath=sockdirpath)
stack.Pk = raeting.PackKind.pack.value
stack.addRemote(RemoteYard(stack=stack, name='manor', lanename=lanename, dirpath=sockdirpath))
console.concise('Created Jobber Stack {0}\n'.format(stack.name))
return stack
|
'Send the return data back via the uxd socket'
| def _return_pub(self, msg, ret, stack):
| route = {'src': (self.road_stack.value.local.name, stack.local.name, 'jid_ret'), 'dst': (msg['route']['src'][0], None, 'remote_cmd')}
mid = self.opts['id']
ret['cmd'] = '_return'
ret['id'] = mid
try:
oput = self.modules.value[ret['fun']].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
ret['out'] = oput
msg = {'route': route, 'load': ret}
stack.transmit(msg, stack.fetchUidByName('manor'))
stack.serviceAll()
|
'Pull the queue for functions to execute'
| def action(self):
| while self.fun.value:
msg = self.fun.value.popleft()
data = msg.get('pub')
match = getattr(self.matcher.value, '{0}_match'.format(data.get('tgt_type', 'glob')))(data['tgt'])
if (not match):
continue
if ('user' in data):
log.info('User {0[user]} Executing command {0[fun]} with jid {0[jid]}'.format(data))
else:
log.info('Executing command {0[fun]} with jid {0[jid]}'.format(data))
log.debug('Command details {0}'.format(data))
if is_windows():
try:
self.proc_run(msg)
except Exception as exc:
log.error('Exception caught by jobber: {0}'.format(exc), exc_info=True)
else:
process = multiprocessing.Process(target=self.proc_run, kwargs={'msg': msg})
process.start()
process.join()
|
'Execute the run in a dedicated process'
| def proc_run(self, msg):
| data = msg['pub']
fn_ = os.path.join(self.proc_dir, data['jid'])
self.opts['__ex_id'] = data['jid']
salt.utils.daemonize_if(self.opts)
salt.transport.jobber_stack = stack = self._setup_jobber_stack()
(src_estate, src_yard, src_share) = msg['route']['src']
salt.transport.jobber_estate_name = src_estate
salt.transport.jobber_yard_name = src_yard
sdata = {'pid': os.getpid()}
sdata.update(data)
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if (function_name in self.modules.value):
try:
func = self.modules.value[data['fun']]
(args, kwargs) = salt.minion.load_args_and_kwargs(func, salt.utils.args.parse_input(data['arg'], no_parse=data.get('no_parse', [])), data)
sys.modules[func.__module__].__context__['retcode'] = 0
executors = (data.get('module_executors') or self.opts.get('module_executors', ['direct_call']))
if isinstance(executors, six.string_types):
executors = [executors]
elif ((not isinstance(executors, list)) or (not executors)):
raise SaltInvocationError('Wrong executors specification: {0}. String or non-empty list expected'.format(executors))
if (self.opts.get('sudo_user', '') and (executors[(-1)] != 'sudo')):
executors[(-1)] = 'sudo.get'
log.trace('Executors list {0}'.format(executors))
for name in executors:
if (name not in self.module_executors.value):
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = self.module_executors.value[name].execute(self.opts, data, func, args, kwargs)
if (return_data is not None):
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if (isinstance(single, dict) and isinstance(iret, list)):
iret.update(single)
else:
if (not iret):
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', self.opts['id'], str(ind)], 'job')
event_data = {'return': single}
self._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get('retcode', 0)
ret['success'] = True
except CommandNotFoundError as exc:
msg = "Command required for '{0}' not found".format(function_name)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
except CommandExecutionError as exc:
log.error("A command in '{0}' had a problem: {1}".format(function_name, exc), exc_info_on_loglevel=logging.DEBUG)
ret['return'] = 'ERROR: {0}'.format(exc)
except SaltInvocationError as exc:
log.error("Problem executing '{0}': {1}".format(function_name, exc), exc_info_on_loglevel=logging.DEBUG)
ret['return'] = "ERROR executing '{0}': {1}".format(function_name, exc)
except TypeError as exc:
msg = 'TypeError encountered executing {0}: {1}. See debug log for more info.'.format(function_name, exc)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
else:
ret['return'] = "'{0}' is not available.".format(function_name)
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
self._return_pub(msg, ret, stack)
if data['ret']:
ret['id'] = self.opts['id']
for returner in set(data['ret'].split(',')):
try:
self.returners.value['{0}.returner'.format(returner)](ret)
except Exception as exc:
log.error('The return failed for job {0} {1}'.format(data['jid'], exc))
console.concise('Closing Jobber Stack {0}\n'.format(stack.name))
stack.server.close()
salt.transport.jobber_stack = None
|
'Assign self.opts'
| def __init__(self, opts):
| self.opts = opts
self.preloads = explode_opts(self.opts)
self.access_keys = salt.daemons.masterapi.access_keys(self.opts)
self.preloads.append(('.salt.access_keys', dict(value=self.access_keys)))
|
'Start up ioflo
port = self.opts[\'raet_port\']'
| def start(self, behaviors=None):
| if (behaviors is None):
behaviors = []
behaviors.extend(['salt.daemons.flo'])
console_logdir = self.opts.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, 'master.log')
else:
consolepath = ''
ioflo.app.run.start(name='master', period=float(self.opts['ioflo_period']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['master_floscript'], behaviors=behaviors, username='', password='', mode=None, houses=None, metas=None, preloads=self.preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath)
|
'Assign self.opts'
| def __init__(self, opts):
| self.opts = opts
|
'Start up ioflo
port = self.opts[\'raet_port\']'
| def tune_in(self, behaviors=None):
| if (behaviors is None):
behaviors = []
behaviors.extend(['salt.daemons.flo'])
preloads = explode_opts(self.opts)
console_logdir = self.opts.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, 'minion.log')
else:
consolepath = ''
ioflo.app.run.start(name=self.opts['id'], period=float(self.opts['ioflo_period']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['minion_floscript'], behaviors=behaviors, username='', password='', mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath)
|
'Start up caller minion for salt-call when there is no local minion'
| def call_in(self, behaviors=None):
| if (behaviors is None):
behaviors = []
behaviors.extend(['salt.daemons.flo'])
preloads = explode_opts(self.opts)
console_logdir = self.opts.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, 'caller.log')
else:
consolepath = ''
ioflo.app.run.start(name=self.opts['id'], period=float(self.opts['ioflo_period']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['caller_floscript'], behaviors=behaviors, username='', password='', mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath)
|
'Spin up a worker, do this in s multiprocess'
| def run(self):
| behaviors = ['salt.daemons.flo']
preloads = [('.salt.opts', dict(value=self.opts))]
console_logdir = self.opts.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, 'maintenance.log')
else:
consolepath = ''
ioflo.app.run.start(name='maintenance', period=float(self.opts['loop_interval']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['maintenance_floscript'], behaviors=behaviors, username='', password='', mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath)
|
'Set up the objects used in the maint process'
| def action(self):
| self.fileserver.value = salt.fileserver.Fileserver(self.opts.value)
self.runners.value = salt.loader.runner(self.opts.value)
self.ckminions.value = salt.utils.minions.CkMinions(self.opts.value)
self.pillargitfs.value = salt.daemons.masterapi.init_git_pillar(self.opts.value)
|
'Clean!'
| def action(self):
| salt.daemons.masterapi.clean_fsbackend(self.opts.value)
|
'Clear out the old jobs cache'
| def action(self):
| salt.daemons.masterapi.clean_old_jobs(self.opts.value)
|
'Update!'
| def action(self):
| for pillargit in self.pillargitfs.value:
pillargit.update()
salt.daemons.masterapi.fileserver_update(self.fileserver.value)
|
'Spin up a worker, do this in multiprocess
windex is worker index'
| def run(self):
| self.opts['__worker'] = True
behaviors = ['salt.daemons.flo']
preloads = [('.salt.opts', dict(value=self.opts)), ('.salt.var.worker_verify', dict(value=self.worker_verify))]
preloads.append(('.salt.var.fork.worker.windex', dict(value=self.windex)))
preloads.append(('.salt.var.zmq.master_key', dict(value=self.mkey)))
preloads.append(('.salt.var.zmq.aes', dict(value=self.aes)))
preloads.append(('.salt.access_keys', dict(value=self.access_keys)))
preloads.extend(salt.daemons.flo.explode_opts(self.opts))
console_logdir = self.opts.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, 'worker_{0}.log'.format(self.windex))
else:
consolepath = ''
ioflo.app.run.start(name='worker{0}'.format(self.windex), period=float(self.opts['ioflo_period']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['worker_floscript'], behaviors=behaviors, username='', password='', mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath)
|
'Set up the uxd stack and behaviors'
| def action(self):
| name = 'worker{0}'.format(self.windex.value)
kind = self.opts.value['__role']
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}' for Master Worker.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]):
lanename = 'master'
else:
emsg = "Invalid application kind '{0}' for Master Worker.".format(kind)
log.error((emsg + '\n'))
raise ValueError(emsg)
sockdirpath = self.opts.value['sock_dir']
self.stack.value = LaneStack(name=name, lanename=lanename, sockdirpath=sockdirpath)
self.stack.value.Pk = raeting.PackKind.pack.value
manor_yard = RemoteYard(stack=self.stack.value, name='manor', lanename=lanename, dirpath=sockdirpath)
self.stack.value.addRemote(manor_yard)
self.remote_loader.value = salt.daemons.masterapi.RemoteFuncs(self.opts.value)
self.local_loader.value = salt.daemons.masterapi.LocalFuncs(self.opts.value, self.access_keys.value)
init = {}
init['route'] = {'src': (None, self.stack.value.local.name, None), 'dst': (None, manor_yard.name, 'worker_req')}
self.stack.value.transmit(init, self.stack.value.fetchUidByName(manor_yard.name))
self.stack.value.serviceAll()
|
'Read in a command and execute it, send the return back up to the
main master process'
| def action(self):
| self.lane_stack.value.serviceAll()
while self.lane_stack.value.rxMsgs:
(msg, sender) = self.lane_stack.value.rxMsgs.popleft()
try:
(s_estate, s_yard, s_share) = msg['route']['src']
(d_estate, d_yard, d_share) = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
return
log.debug('**** Worker Router rxMsg\n msg= {0}\n'.format(msg))
if ('load' in msg):
cmd = msg['load'].get('cmd')
if (not cmd):
continue
elif cmd.startswith('__'):
continue
ret = {}
if (d_share == 'remote_cmd'):
if hasattr(self.remote_loader.value, cmd):
ret['return'] = getattr(self.remote_loader.value, cmd)(msg['load'])
elif (d_share == 'local_cmd'):
if hasattr(self.local_loader.value, cmd):
ret['return'] = getattr(self.local_loader.value, cmd)(msg['load'])
else:
ret = {'error': 'Invalid request'}
if ((cmd == 'publish') and ('pub' in ret.get('return', {}))):
r_share = 'pub_ret'
ret['__worker_verify'] = self.worker_verify.value
else:
r_share = s_share
if (cmd not in INHIBIT_RETURN):
ret['route'] = {'src': (None, self.lane_stack.value.local.name, None), 'dst': (s_estate, s_yard, r_share)}
self.lane_stack.value.transmit(ret, self.lane_stack.value.fetchUidByName('manor'))
self.lane_stack.value.serviceAll()
|
'Assign master key to .salt.var.zmq.master_key
Copy opts[\'aes\'] to .salt.var.zmq.aes'
| def action(self):
| self.mkey.value = salt.crypt.MasterKeys(self.opts.value)
self.aes.value = self.opts.value['aes']
|
'Start the ret port binding'
| def run(self):
| self.context = zmq.Context(self.opts['worker_threads'])
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
log.info('ZMQ Ret port binding to {0}'.format(self.uri))
self.clients = self.context.socket(zmq.ROUTER)
if ((self.opts['ipv6'] is True) and hasattr(zmq, 'IPV4ONLY')):
self.clients.setsockopt(zmq.IPV4ONLY, 0)
try:
self.clients.setsockopt(zmq.HWM, self.opts['rep_hwm'])
except AttributeError:
self.clients.setsockopt(zmq.SNDHWM, self.opts['rep_hwm'])
self.clients.setsockopt(zmq.RCVHWM, self.opts['rep_hwm'])
self.clients.setsockopt(zmq.BACKLOG, self.opts['zmq_backlog'])
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(os.path.join(self.opts['sock_dir'], 'workers.ipc'))
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if (exc.errno == errno.EINTR):
continue
raise exc
|
'Initializes zmq
Put here so only runs initialization if we want multi-headed master'
| def action(self):
| self.crypticle.value = salt.crypt.Crypticle(self.opts.value, self.opts.value.get('aes'))
|
'Set up tracking value(s)'
| def _prepare(self):
| if (not HAS_ZMQ):
return
self.created = False
self.serial = salt.payload.Serial(self.opts.value)
|
'Create the publish port if it is not available and then publish the
messages on it'
| def action(self):
| if (not self.zmq_behavior):
return
if (not self.created):
self.context = zmq.Context(1)
self.pub_sock = self.context.socket(zmq.PUB)
try:
self.pub_sock.setsockopt(zmq.HWM, self.opts.value.get('pub_hwm', 1000))
except AttributeError:
self.pub_sock.setsockopt(zmq.SNDHWM, self.opts.value.get('pub_hwm', 1000))
self.pub_sock.setsockopt(zmq.RCVHWM, self.opts.value.get('pub_hwm', 1000))
if ((self.opts.value['ipv6'] is True) and hasattr(zmq, 'IPV4ONLY')):
self.pub_sock.setsockopt(zmq.IPV4ONLY, 0)
self.pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
self.pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts.value)
log.info('Starting the Salt ZeroMQ Publisher on {0}'.format(self.pub_uri))
self.pub_sock.bind(self.pub_uri)
self.created = True
try:
for package in self.publish.value:
payload = {'enc': 'aes'}
payload['load'] = self.crypticle.value.dumps(package['return']['pub'])
if self.opts.value['sign_pub_messages']:
master_pem_path = os.path.join(self.opts.value['pki_dir'], 'master.pem')
log.debug('Signing data packet for publish')
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
send_payload = self.serial.dumps(payload)
if self.opts.value['zmq_filtering']:
if (package['return']['pub']['tgt_type'] == 'list'):
for topic in package['return']['pub']['tgt']:
htopic = hashlib.sha1(topic).hexdigest()
self.pub_sock.send(htopic, flags=zmq.SNDMORE)
self.pub_sock.send(send_payload)
else:
self.pub_sock.send('broadcast', flags=zmq.SNDMORE)
self.pub_sock.send(send_payload)
else:
self.pub_sock.send(send_payload)
except zmq.ZMQError as exc:
if (exc.errno == errno.EINTR):
return
raise exc
|
'Create the initial seting value for the worker'
| def _prepare(self):
| self.created = False
|
'Create the master MWorker if it is not present, then iterate over the
connection with the ioflo sequence'
| def action(self):
| if (not self.created):
crypticle = salt.crypt.Crypticle(self.opts.value, self.aes.value)
self.worker = salt.master.FloMWorker(self.opts.value, self.key.value)
self.worker.setup()
self.created = True
log.info('Started ZMQ worker')
self.worker.handle_request()
|
'Check if the specified filename has correct permissions'
| def check_permissions(self, filename):
| if salt.utils.platform.is_windows():
return True
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
log.error('Failed to determine groups for user {0}. The user is not available.\n'.format(user))
return False
fmode = os.stat(filename)
if (os.getuid() == 0):
if ((fmode.st_uid == uid) or (fmode.st_gid != gid)):
return True
elif (self.opts.get('permissive_pki_access', False) and (fmode.st_gid in groups)):
return True
else:
if (stat.S_IWOTH & fmode.st_mode):
return False
if (self.opts.get('permissive_pki_access', False) and (stat.S_IWGRP & fmode.st_mode)):
return True
elif (stat.S_IWGRP & fmode.st_mode):
return False
if (not ((stat.S_IWGRP & fmode.st_mode) or (stat.S_IWOTH & fmode.st_mode))):
return True
return False
|
'Check a keyid for membership in a signing file'
| def check_signing_file(self, keyid, signing_file):
| if ((not signing_file) or (not os.path.exists(signing_file))):
return False
if (not self.check_permissions(signing_file)):
message = 'Wrong permissions for {0}, ignoring content'
log.warning(message.format(signing_file))
return False
with salt.utils.files.fopen(signing_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
elif salt.utils.expr_match(keyid, line):
return True
return False
|
'Check a keyid for membership in a autosign directory.'
| def check_autosign_dir(self, keyid):
| autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign')
expire_minutes = self.opts.get('autosign_timeout', 120)
if (expire_minutes > 0):
min_time = (time.time() - (60 * int(expire_minutes)))
for (root, dirs, filenames) in os.walk(autosign_dir):
for f in filenames:
stub_file = os.path.join(autosign_dir, f)
mtime = os.path.getmtime(stub_file)
if (mtime < min_time):
log.warning('Autosign keyid expired {0}'.format(stub_file))
os.remove(stub_file)
stub_file = os.path.join(autosign_dir, keyid)
if (not os.path.exists(stub_file)):
return False
os.remove(stub_file)
return True
|
'Checks if the specified keyid should automatically be rejected.'
| def check_autoreject(self, keyid):
| return self.check_signing_file(keyid, self.opts.get('autoreject_file', None))
|
'Checks if the specified keyid should automatically be signed.'
| def check_autosign(self, keyid):
| if self.opts['auto_accept']:
return True
if self.check_signing_file(keyid, self.opts.get('autosign_file', None)):
return True
if self.check_autosign_dir(keyid):
return True
return False
|
'Set the local file objects from the file server interface'
| def __setup_fileserver(self):
| fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_find = fs_._find_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._symlink_list = fs_.symlink_list
self._file_envs = fs_.envs
|
'Verify that the passed information authorized a minion to execute'
| def __verify_minion_publish(self, load):
| if ('peer' not in self.opts):
return False
if (not isinstance(self.opts['peer'], dict)):
return False
if any(((key not in load) for key in ('fun', 'arg', 'tgt', 'ret', 'id'))):
return False
if re.match('publish.*', load['fun']):
return False
perms = []
for match in self.opts['peer']:
if re.match(match, load['id']):
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if (',' in load['fun']):
load['fun'] = load['fun'].split(',')
arg_ = []
for arg in load['arg']:
arg_.append(arg.split())
load['arg'] = arg_
return self.ckminions.auth_check(perms, load['fun'], load['arg'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True)
|
'Return the master options to the minion'
| def _master_opts(self, load):
| mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if (saltenv not in file_roots):
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
|
'Return the results from master_tops if configured'
| def _master_tops(self, load, skip_verify=False):
| if (not skip_verify):
if ('id' not in load):
log.error('Received call for external nodes without an id')
return {}
if (not salt.utils.verify.valid_id(self.opts, load['id'])):
return {}
opts = {}
grains = {}
ret = {}
if ('opts' in load):
opts = load['opts']
if ('grains' in load['opts']):
grains = load['opts']['grains']
for fun in self.tops:
if (fun not in self.opts.get('master_tops', {})):
continue
try:
ret = salt.utils.dictupdate.merge(ret, self.tops[fun](opts=opts, grains=grains), merge_lists=True)
except Exception as exc:
log.error('Top function {0} failed with error {1} for minion {2}'.format(fun, exc, load['id']))
return ret
|
'Gathers the data from the specified minions\' mine'
| def _mine_get(self, load, skip_verify=False):
| if (not skip_verify):
if any(((key not in load) for key in ('id', 'tgt', 'fun'))):
return {}
if ('mine_get' in self.opts):
if (not isinstance(self.opts['mine_get'], dict)):
return {}
perms = set()
for match in self.opts['mine_get']:
if re.match(match, load['id']):
if isinstance(self.opts['mine_get'][match], list):
perms.update(self.opts['mine_get'][match])
if (not any((re.match(perm, load['fun']) for perm in perms))):
return {}
ret = {}
if (not salt.utils.verify.valid_id(self.opts, load['id'])):
return ret
match_type = load.get('tgt_type', 'glob')
if (match_type.lower() == 'pillar'):
match_type = 'pillar_exact'
if (match_type.lower() == 'compound'):
match_type = 'compound_pillar_exact'
checker = salt.utils.minions.CkMinions(self.opts)
minions = checker.check_minions(load['tgt'], match_type, greedy=False)
for minion in minions:
fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine')
if isinstance(fdata, dict):
fdata = fdata.get(load['fun'])
if fdata:
ret[minion] = fdata
return ret
|
'Return the mine data'
| def _mine(self, load, skip_verify=False):
| if (not skip_verify):
if (('id' not in load) or ('data' not in load)):
return False
if (self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False)):
cbank = 'minions/{0}'.format(load['id'])
ckey = 'mine'
if (not load.get('clear', False)):
data = self.cache.fetch(cbank, ckey)
if isinstance(data, dict):
data.update(load['data'])
load['data'] = data
self.cache.store(cbank, ckey, load['data'])
return True
|
'Allow the minion to delete a specific function from its own mine'
| def _mine_delete(self, load):
| if (('id' not in load) or ('fun' not in load)):
return False
if (self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False)):
cbank = 'minions/{0}'.format(load['id'])
ckey = 'mine'
try:
data = self.cache.fetch(cbank, ckey)
if (not isinstance(data, dict)):
return False
if (load['fun'] in data):
del data[load['fun']]
self.cache.store(cbank, ckey, data)
except OSError:
return False
return True
|
'Allow the minion to delete all of its own mine contents'
| def _mine_flush(self, load, skip_verify=False):
| if ((not skip_verify) and ('id' not in load)):
return False
if (self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False)):
return self.cache.flush('minions/{0}'.format(load['id']), 'mine')
return True
|
'Allows minions to send files to the master, files are sent to the
master file cache'
| def _file_recv(self, load):
| if any(((key not in load) for key in ('id', 'path', 'loc'))):
return False
if ((not self.opts['file_recv']) or os.path.isabs(load['path'])):
return False
if (os.path.isabs(load['path']) or ('../' in load['path'])):
return False
if (not salt.utils.verify.valid_id(self.opts, load['id'])):
return False
file_recv_max_size = ((1024 * 1024) * self.opts['file_recv_max_size'])
if (('loc' in load) and (load['loc'] < 0)):
log.error('Invalid file pointer: load[loc] < 0')
return False
if ((len(load['data']) + load.get('loc', 0)) > file_recv_max_size):
log.error('Exceeding file_recv_max_size limit: {0}'.format(file_recv_max_size))
return False
normpath = load['path']
if (':' in normpath):
normpath = normpath.replace('\\', '/')
normpath = os.path.normpath(normpath)
cpath = os.path.join(self.opts['cachedir'], 'minions', load['id'], 'files', normpath)
cdir = os.path.dirname(cpath)
if (not os.path.isdir(cdir)):
try:
os.makedirs(cdir)
except os.error:
pass
if (os.path.isfile(cpath) and (load['loc'] != 0)):
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
|
'Return the pillar data for the minion'
| def _pillar(self, load):
| if any(((key not in load) for key in ('id', 'grains'))):
return False
log.debug('Master _pillar using ext: {0}'.format(load.get('ext')))
pillar = salt.pillar.get_pillar(self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar_override=load.get('pillar_override', {}))
pillar_dirs = {}
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False):
self.cache.store('minions/{0}'.format(load['id']), 'data', {'grains': load['grains'], 'pillar': data})
self.event.fire_event('Minion data cache refresh', salt.utils.event.tagify(load['id'], 'refresh', 'minion'))
return data
|
'Receive an event from the minion and fire it on the master event
interface'
| def _minion_event(self, load):
| if ('id' not in load):
return False
if (('events' not in load) and (('tag' not in load) or ('data' not in load))):
return False
if ('events' in load):
for event in load['events']:
if ('data' in event):
event_data = event['data']
else:
event_data = event
self.event.fire_event(event_data, event['tag'])
if (load.get('pretag') is not None):
self.event.fire_event(event_data, salt.utils.event.tagify(event['tag'], base=load['pretag']))
else:
tag = load['tag']
self.event.fire_event(load, tag)
return True
|
'Handle the return data sent from the minions'
| def _return(self, load):
| endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid())
if any(((key not in load) for key in ('return', 'jid', 'id'))):
return False
if (load['jid'] == 'req'):
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False))
saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[saveload_fstr](load['jid'], load)
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid'])
self.event.fire_event(load, salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
if ((not self.opts['job_cache']) or self.opts.get('ext_job_cache')):
return
fstr = '{0}.update_endtime'.format(self.opts['master_job_cache'])
if (self.opts.get('job_cache_store_endtime') and (fstr in self.mminion.returners)):
self.mminion.returners[fstr](load['jid'], endtime)
fstr = '{0}.returner'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load)
|
'Receive a syndic minion return and format it to look like returns from
individual minions.'
| def _syndic_return(self, load):
| if any(((key not in load) for key in ('return', 'jid', 'id'))):
return None
if ('load' in load):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
for (key, item) in six.iteritems(load['return']):
ret = {'jid': load['jid'], 'id': key, 'return': item}
if ('out' in load):
ret['out'] = load['out']
self._return(ret)
|
'Execute a runner from a minion, return the runner\'s function data'
| def minion_runner(self, load):
| if ('peer_run' not in self.opts):
return {}
if (not isinstance(self.opts['peer_run'], dict)):
return {}
if any(((key not in load) for key in ('fun', 'arg', 'id'))):
return {}
perms = set()
for match in self.opts['peer_run']:
if re.match(match, load['id']):
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, load['fun']):
good = True
if (not good):
log.warning('Minion id {0} is not who it says it is!'.format(load['id']))
return {}
opts = {}
opts.update(self.opts)
opts.update({'fun': load['fun'], 'arg': salt.utils.args.parse_input(load['arg'], no_parse=load.get('no_parse', [])), 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']})
runner = salt.runner.Runner(opts)
return runner.run()
|
'Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.'
| def pub_ret(self, load, skip_verify=False):
| if ((not skip_verify) and any(((key not in load) for key in ('jid', 'id')))):
return {}
else:
auth_cache = os.path.join(self.opts['cachedir'], 'publish_auth')
if (not os.path.isdir(auth_cache)):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, load['jid'])
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if (not (load['id'] == fp_.read())):
return {}
return self.local.get_cache_returns(load['jid'])
|
'Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module'
| def minion_pub(self, load):
| if (not self.__verify_minion_publish(load)):
return {}
pub_load = {'fun': load['fun'], 'arg': salt.utils.args.parse_input(load['arg'], no_parse=load.get('no_parse', [])), 'tgt_type': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id']}
if ('tgt_type' in load):
if load['tgt_type'].startswith('node'):
if (load['tgt'] in self.opts['nodegroups']):
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['tgt_type'] = 'compound'
else:
return {}
else:
pub_load['tgt_type'] = load['tgt_type']
ret = {}
ret['jid'] = self.local.cmd_async(**pub_load)
ret['minions'] = self.ckminions.check_minions(load['tgt'], pub_load['tgt_type'])
auth_cache = os.path.join(self.opts['cachedir'], 'publish_auth')
if (not os.path.isdir(auth_cache)):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(ret['jid']))
with salt.utils.files.fopen(jid_fn, 'w+') as fp_:
fp_.write(load['id'])
return ret
|
'Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module'
| def minion_publish(self, load):
| if (not self.__verify_minion_publish(load)):
return {}
pub_load = {'fun': load['fun'], 'arg': salt.utils.args.parse_input(load['arg'], no_parse=load.get('no_parse', [])), 'tgt_type': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id']}
if ('tmo' in load):
try:
pub_load['timeout'] = int(load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(load['tmo'])
log.warning(msg)
return {}
if ('timeout' in load):
try:
pub_load['timeout'] = int(load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(load['timeout'])
log.warning(msg)
return {}
if ('tgt_type' in load):
if load['tgt_type'].startswith('node'):
if (load['tgt'] in self.opts['nodegroups']):
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['tgt_type'] = 'compound'
else:
return {}
else:
pub_load['tgt_type'] = load['tgt_type']
pub_load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**pub_load):
if (load.get('form', '') == 'full'):
data = minion
if ('jid' in minion):
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if ('jid' in minion):
ret['__jid__'] = minion['jid']
for (key, val) in six.iteritems(self.local.get_cache_returns(ret['__jid__'])):
if (key not in ret):
ret[key] = val
if (load.get('form', '') != 'full'):
ret.pop('__jid__')
return ret
|
'Allow a minion to request revocation of its own key'
| def revoke_auth(self, load):
| if ('id' not in load):
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False))
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.