text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Create a TCP socket connection <END_TASK> <USER_TASK:> Description: def _connect(self): """ Create a TCP socket connection """
# we want to mimic what socket.create_connection does to support # ipv4/ipv6, but we want to set options prior to calling # socket.connect() err = None for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): family, socktype, proto, canonname, socket_address = res sock = None try: sock = socket.socket(family, socktype, proto) # TCP_NODELAY sock.setsockopt(socket.IPPROTO_TCP,socket.TCP_NODELAY, 1) # TCP_KEEPALIVE if self.socket_keepalive: sock.setsockopt(socket.SOL_SOCKET,socket.SO_KEEPALIVE, 1) for k, v in iteritems(self.socket_keepalive_options): sock.setsockopt(socket.SOL_TCP, k, v) # set the socket_connect_timeout before we connect sock.settimeout(self.socket_connect_timeout) # connect sock.connect(socket_address) # set the socket_timeout now that we're connected sock.settimeout(self.socket_timeout) return sock except socket.error as _: err = _ if sock is not None: sock.close() if err is not None: raise err raise socket.error("socket.getaddrinfo returned an empty list")
<SYSTEM_TASK:> Disconnects from the SSDB server <END_TASK> <USER_TASK:> Description: def disconnect(self): """ Disconnects from the SSDB server """
self._parser.on_disconnect() if self._sock is None: return try: self._sock.shutdown(socket.SHUT_RDWR) self._sock.close() except socket.error: pass self._sock = None
<SYSTEM_TASK:> Pack a series of arguments into a value SSDB command <END_TASK> <USER_TASK:> Description: def pack_command(self, *args): """ Pack a series of arguments into a value SSDB command """
# the client might have included 1 or more literal arguments in # the command name, e.g., 'CONFIG GET'. The SSDB server expects # these arguments to be sent separately, so split the first # argument manually. All of these arguements get wrapped # in the Token class to prevent them from being encoded. command = args[0] if ' ' in command: args = tuple([Token(s) for s in command.split(' ')]) + args[1:] else: args = (Token(command),) + args[1:] args_output = SYM_EMPTY.join([ SYM_EMPTY.join(( b(str(len(k))), SYM_LF, k, SYM_LF )) for k in imap(self.encode, args) ]) output = "%s%s" % (args_output,SYM_LF) return output
<SYSTEM_TASK:> Substitutes a variable read from a YAML node with the value stored in Fabric's ``env`` dictionary. Creates an <END_TASK> <USER_TASK:> Description: def expand_env_lazy(loader, node): """ Substitutes a variable read from a YAML node with the value stored in Fabric's ``env`` dictionary. Creates an object for late resolution. :param loader: YAML loader. :type loader: yaml.loader.SafeLoader :param node: Document node. :type node: ScalarNode :return: Corresponding value stored in the ``env`` dictionary. :rtype: any """
val = loader.construct_scalar(node) return lazy_once(env_get, val)
<SYSTEM_TASK:> This generator function compares a record, slot by slot, and yields <END_TASK> <USER_TASK:> Description: def compare_record_iter(a, b, fs_a=None, fs_b=None, options=None): """This generator function compares a record, slot by slot, and yields differences found as ``DiffInfo`` objects. args: ``a=``\ *Record* The base object ``b=``\ *Record*\ \|\ *object* The 'other' object, which must be the same type as ``a``, unless ``options.duck_type`` is set. ``fs_a=``\ *FieldSelector\* The current diff context, prefixed to any returned ``base`` field in yielded ``DiffInfo`` objects. Defaults to an empty FieldSelector. ``fs_b=``\ *FieldSelector\* The ``other`` object context. This will differ from ``fs_a`` in the case of collections, where a value has moved slots. Defaults to an empty FieldSelector. ``options=``\ *DiffOptions\* A constructed ``DiffOptions`` object; a default one is created if not passed in. """
if not options: options = DiffOptions() if not options.duck_type and type(a) != type(b) and not ( a is _nothing or b is _nothing ): raise TypeError( "cannot compare %s with %s" % (type(a).__name__, type(b).__name__) ) if fs_a is None: fs_a = FieldSelector(tuple()) fs_b = FieldSelector(tuple()) properties = ( type(a).properties if a is not _nothing else type(b).properties ) for propname in sorted(properties): prop = properties[propname] if options.is_filtered(prop, fs_a + propname): continue propval_a = options.normalize_object_slot( getattr(a, propname, _nothing), prop, a, ) propval_b = options.normalize_object_slot( getattr(b, propname, _nothing), prop, b, ) if propval_a is _nothing and propval_b is _nothing: # don't yield NO_CHANGE for fields missing on both sides continue one_side_nothing = (propval_a is _nothing) != (propval_b is _nothing) types_match = type(propval_a) == type(propval_b) comparable = ( isinstance(propval_a, COMPARABLE) or isinstance(propval_b, COMPARABLE) ) prop_fs_a = fs_a + [propname] prop_fs_b = fs_b + [propname] if comparable and ( types_match or options.duck_type or ( options.ignore_empty_slots and one_side_nothing ) ): if one_side_nothing: diff_types_found = set() for diff in _diff_iter( propval_a, propval_b, prop_fs_a, prop_fs_b, options, ): if one_side_nothing: if diff.diff_type != DiffTypes.NO_CHANGE: diff_types_found.add(diff.diff_type) else: yield diff if one_side_nothing: net_diff = None if diff_types_found: assert(len(diff_types_found) == 1) net_diff = tuple(diff_types_found)[0] elif options.unchanged: net_diff = DiffTypes.NO_CHANGE if net_diff: yield DiffInfo( diff_type=net_diff, base=prop_fs_a, other=prop_fs_b, ) elif one_side_nothing: yield DiffInfo( diff_type=( DiffTypes.ADDED if propval_a is _nothing else DiffTypes.REMOVED ), base=fs_a + [propname], other=fs_b + [propname], ) elif not options.items_equal(propval_a, propval_b): yield DiffInfo( diff_type=DiffTypes.MODIFIED, base=fs_a + [propname], other=fs_b + [propname], ) elif options.unchanged: yield DiffInfo( diff_type=DiffTypes.NO_CHANGE, base=fs_a + [propname], other=fs_b + [propname], )
<SYSTEM_TASK:> Normalizes whitespace; called if ``ignore_ws`` is true. <END_TASK> <USER_TASK:> Description: def normalize_whitespace(self, value): """Normalizes whitespace; called if ``ignore_ws`` is true."""
if isinstance(value, unicode): return u" ".join( x for x in re.split(r'\s+', value, flags=re.UNICODE) if len(x) ) else: return " ".join(value.split())
<SYSTEM_TASK:> Hook which is called on every value before comparison, and should <END_TASK> <USER_TASK:> Description: def normalize_val(self, value=_nothing): """Hook which is called on every value before comparison, and should return the scrubbed value or ``self._nothing`` to indicate that the value is not set. """
if isinstance(value, basestring): value = self.normalize_text(value) if self.ignore_empty_slots and self.value_is_empty(value): value = _nothing return value
<SYSTEM_TASK:> This hook wraps ``normalize_slot``, and performs clean-ups which <END_TASK> <USER_TASK:> Description: def normalize_object_slot(self, value=_nothing, prop=None, obj=None): """This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value. """
if value is not _nothing and hasattr(prop, "compare_as"): method, nargs = getattr(prop, "compare_as_info", (False, 1)) args = [] if method: args.append(obj) if nargs: args.append(value) value = prop.compare_as(*args) return self.normalize_slot(value, prop)
<SYSTEM_TASK:> Retrieve an object identifier from the given record; if it is an <END_TASK> <USER_TASK:> Description: def record_id(self, record, type_=None, selector=None): """Retrieve an object identifier from the given record; if it is an alien class, and the type is provided, then use duck typing to get the corresponding fields of the alien class."""
pk = record_id(record, type_, selector, self.normalize_object_slot) return pk
<SYSTEM_TASK:> Check consistency of model reactions. <END_TASK> <USER_TASK:> Description: def fastcc(model, epsilon, solver): """Check consistency of model reactions. Yield all reactions in the model that are not part of the consistent subset. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use. """
reaction_set = set(model.reactions) subset = set(reaction_id for reaction_id in reaction_set if model.limits[reaction_id].lower >= 0) logger.info('Checking {} irreversible reactions...'.format(len(subset))) logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) p = FastcoreProblem(model, solver, epsilon=epsilon) p.lp7(subset) consistent_subset = set( reaction_id for reaction_id in model.reactions if abs(p.get_flux(reaction_id)) >= 0.999 * epsilon) logger.debug('|A| = {}, A = {}'.format( len(consistent_subset), consistent_subset)) for reaction in subset - consistent_subset: # Inconsistent reaction yield reaction # Check remaining reactions subset = (reaction_set - subset) - consistent_subset logger.info('Checking reversible reactions...') logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) flipped = False singleton = False while len(subset) > 0: logger.info('{} reversible reactions left to check...'.format( len(subset))) if singleton: reaction = next(iter(subset)) subset_i = {reaction} logger.debug('LP3 on {}'.format(subset_i)) p.maximize({reaction: -1 if p.is_flipped(reaction) else 1}) else: subset_i = subset logger.debug('LP7 on {}'.format(subset_i)) p.lp7(subset_i) consistent_subset.update( reaction_id for reaction_id in subset if abs(p.get_flux(reaction_id) >= 0.999 * epsilon)) logger.debug('|A| = {}, A = {}'.format( len(consistent_subset), consistent_subset)) if not subset.isdisjoint(consistent_subset): subset -= consistent_subset logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) flipped = False else: # TODO: irreversible reactions are taken care of before the # loop so at this point all reactions in subset_i are reversble(?). subset_rev_i = subset_i & model.reversible if flipped or len(subset_rev_i) == 0: flipped = False if singleton: subset -= subset_rev_i for reaction in subset_rev_i: logger.info('Inconsistent: {}'.format(reaction)) yield reaction else: singleton = True else: p.flip(subset_rev_i) flipped = True logger.info('Flipped {} reactions'.format(len(subset_rev_i)))
<SYSTEM_TASK:> Quickly check whether model is consistent <END_TASK> <USER_TASK:> Description: def fastcc_is_consistent(model, epsilon, solver): """Quickly check whether model is consistent Return true if the model is consistent. If it is only necessary to know whether a model is consistent, this function is fast as it will return the result as soon as it finds a single inconsistent reaction. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use. """
for reaction in fastcc(model, epsilon, solver): return False return True
<SYSTEM_TASK:> Return consistent subset of model. <END_TASK> <USER_TASK:> Description: def fastcc_consistent_subset(model, epsilon, solver): """Return consistent subset of model. The largest consistent subset is returned as a set of reaction names. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use. Returns: Set of reaction IDs in the consistent reaction subset. """
reaction_set = set(model.reactions) return reaction_set.difference(fastcc(model, epsilon, solver))
<SYSTEM_TASK:> Find a flux consistent subnetwork containing the core subset. <END_TASK> <USER_TASK:> Description: def fastcore(model, core, epsilon, solver, scaling=1e5, weights={}): """Find a flux consistent subnetwork containing the core subset. The result will contain the core subset and as few of the additional reactions as possible. Args: model: :class:`MetabolicModel` to solve. core: Set of core reaction IDs. epsilon: Flux threshold value. solver: LP solver instance to use. scaling: Scaling value to apply (see [Vlassis14]_ for more information on this parameter). weights: Dictionary with reaction IDs as keys and values as weights. Weights specify the cost of adding a reaction to the consistent subnetwork. Default value is 1. Returns: Set of reaction IDs in the consistent reaction subset. """
consistent_subset = set() reaction_set = set(model.reactions) subset = core - model.reversible logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) penalty_set = reaction_set - core logger.debug('|P| = {}, P = {}'.format(len(penalty_set), penalty_set)) p = FastcoreProblem(model, solver, epsilon=epsilon) mode = set(p.find_sparse_mode(subset, penalty_set, scaling, weights)) if not subset.issubset(mode): raise FastcoreError('Inconsistent irreversible core reactions:' ' {}'.format(subset - mode)) consistent_subset |= mode logger.debug('|A| = {}, A = {}'.format( len(consistent_subset), consistent_subset)) subset = core - mode logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) flipped = False singleton = False while len(subset) > 0: penalty_set -= consistent_subset if singleton: subset_i = set((next(iter(subset)),)) else: subset_i = subset mode = set(p.find_sparse_mode(subset_i, penalty_set, scaling, weights)) consistent_subset.update(mode) logger.debug('|A| = {}, A = {}'.format( len(consistent_subset), consistent_subset)) if not subset.isdisjoint(consistent_subset): logger.debug('Subset improved {} -> {}'.format( len(subset), len(subset - consistent_subset))) subset -= consistent_subset logger.debug('|J| = {}, J = {}'.format(len(subset), subset)) flipped = False else: logger.debug('Nothing found, changing state...') subset_rev_i = subset_i & model.reversible if flipped or len(subset_rev_i) == 0: if singleton: raise FastcoreError('Global network inconsistent:' ' {}'.format(subset_rev_i)) logger.debug('Going to non-flipped, singleton state...') singleton = True flipped = False else: p.flip(subset_rev_i) flipped = True logger.debug('Flipped {} reactions'.format( len(subset_rev_i))) return consistent_subset
<SYSTEM_TASK:> Approximately maximize the number of reaction with flux. <END_TASK> <USER_TASK:> Description: def lp7(self, reaction_subset): """Approximately maximize the number of reaction with flux. This is similar to FBA but approximately maximizing the number of reactions in subset with flux > epsilon, instead of just maximizing the flux of one particular reaction. LP7 prefers "flux splitting" over "flux concentrating". """
if self._zl is None: self._add_maximization_vars() positive = set(reaction_subset) - self._flipped negative = set(reaction_subset) & self._flipped v = self._v.set(positive) zl = self._zl.set(positive) cs = self._prob.add_linear_constraints(v >= zl) self._temp_constr.extend(cs) v = self._v.set(negative) zl = self._zl.set(negative) cs = self._prob.add_linear_constraints(v <= -zl) self._temp_constr.extend(cs) self._prob.set_objective(self._zl.sum(reaction_subset)) self._solve()
<SYSTEM_TASK:> Force reactions in K above epsilon while minimizing support of P. <END_TASK> <USER_TASK:> Description: def lp10(self, subset_k, subset_p, weights={}): """Force reactions in K above epsilon while minimizing support of P. This program forces reactions in subset K to attain flux > epsilon while minimizing the sum of absolute flux values for reactions in subset P (L1-regularization). """
if self._z is None: self._add_minimization_vars() positive = set(subset_k) - self._flipped negative = set(subset_k) & self._flipped v = self._v.set(positive) cs = self._prob.add_linear_constraints(v >= self._epsilon) self._temp_constr.extend(cs) v = self._v.set(negative) cs = self._prob.add_linear_constraints(v <= -self._epsilon) self._temp_constr.extend(cs) self._prob.set_objective(self._z.expr( (rxnid, -weights.get(rxnid, 1)) for rxnid in subset_p)) self._solve()
<SYSTEM_TASK:> Find a sparse mode containing reactions of the core subset. <END_TASK> <USER_TASK:> Description: def find_sparse_mode(self, core, additional, scaling, weights={}): """Find a sparse mode containing reactions of the core subset. Return an iterator of the support of a sparse mode that contains as many reactions from core as possible, and as few reactions from additional as possible (approximately). A dictionary of weights can be supplied which gives further penalties for including specific additional reactions. """
if len(core) == 0: return self.lp7(core) k = set() for reaction_id in core: flux = self.get_flux(reaction_id) if self.is_flipped(reaction_id): flux *= -1 if flux >= self._epsilon: k.add(reaction_id) if len(k) == 0: return self.lp10(k, additional, weights) for reaction_id in self._model.reactions: flux = self.get_flux(reaction_id) if abs(flux) >= self._epsilon / scaling: yield reaction_id
<SYSTEM_TASK:> Flip the specified reactions. <END_TASK> <USER_TASK:> Description: def flip(self, reactions): """Flip the specified reactions."""
for reaction in reactions: if reaction in self._flipped: self._flipped.remove(reaction) else: self._flipped.add(reaction)
<SYSTEM_TASK:> Calculate generalized Jaccard similarity of formulas. <END_TASK> <USER_TASK:> Description: def _jaccard_similarity(f1, f2, weight_func): """Calculate generalized Jaccard similarity of formulas. Returns the weighted similarity value or None if there is no overlap at all. If the union of the formulas has a weight of zero (i.e. the denominator in the Jaccard similarity is zero), a value of zero is returned. """
elements = set(f1) elements.update(f2) count, w_count, w_total = 0, 0, 0 for element in elements: mi = min(f1.get(element, 0), f2.get(element, 0)) mx = max(f1.get(element, 0), f2.get(element, 0)) count += mi w = weight_func(element) w_count += w * mi w_total += w * mx if count == 0: return None return 0.0 if w_total == 0.0 else w_count / w_total
<SYSTEM_TASK:> Predict reaction pairs using iterated method. <END_TASK> <USER_TASK:> Description: def predict_compound_pairs_iterated( reactions, formulas, prior=(1, 43), max_iterations=None, element_weight=element_weight): """Predict reaction pairs using iterated method. Returns a tuple containing a dictionary of predictions keyed by the reaction IDs, and the final number of iterations. Each reaction prediction entry contains a tuple with a dictionary of transfers and a dictionary of unbalanced compounds. The dictionary of unbalanced compounds is empty only if the reaction is balanced. Args: reactions: Dictionary or pair-iterable of (id, equation) pairs. IDs must be any hashable reaction identifier (e.g. string) and equation must be :class:`psamm.reaction.Reaction` objects. formulas: Dictionary mapping compound IDs to :class:`psamm.formula.Formula`. Formulas must be flattened. prior: Tuple of (alpha, beta) parameters for the MAP inference. If not provided, the default parameters will be used: (1, 43). max_iterations: Maximum iterations to run before stopping. If the stopping condition is reached before this number of iterations, the procedure also stops. If None, the procedure only stops when the stopping condition is reached. element_weight: A function providing returning weight value for the given :class:`psamm.formula.Atom` or :class:`psamm.formula.Radical`. If not provided, the default weight will be used (H=0, C=1, *=0.82) """
prior_alpha, prior_beta = prior reactions = dict(reactions) pair_reactions = {} possible_pairs = Counter() for reaction_id, equation in iteritems(reactions): for (c1, _), (c2, _) in product(equation.left, equation.right): spair = tuple(sorted([c1.name, c2.name])) possible_pairs[spair] += 1 pair_reactions.setdefault(spair, set()).add(reaction_id) next_reactions = set(reactions) pairs_predicted = None prediction = {} weights = {} iteration = 0 while len(next_reactions) > 0: iteration += 1 if max_iterations is not None and iteration > max_iterations: break logger.info('Iteration {}: {} reactions...'.format( iteration, len(next_reactions))) for reaction_id in next_reactions: result = predict_compound_pairs( reactions[reaction_id], formulas, weights, element_weight) if result is None: continue transfer, balance = result rpairs = {} for ((c1, _), (c2, _)), form in iteritems(transfer): rpairs.setdefault((c1, c2), []).append(form) prediction[reaction_id] = rpairs, balance pairs_predicted = Counter() for reaction_id, (rpairs, _) in iteritems(prediction): for c1, c2 in rpairs: spair = tuple(sorted([c1.name, c2.name])) pairs_predicted[spair] += 1 next_reactions = set() for spair, total in sorted(iteritems(possible_pairs)): pred = pairs_predicted[spair] # The weight is set to the maximum a posteriori (MAP) estimate # of the primary pair probability distribution. posterior_alpha = prior_alpha + pred posterior_beta = prior_beta + total - pred pair_weight = ((posterior_alpha - 1) / (posterior_alpha + posterior_beta - 2)) if (spair not in weights or abs(pair_weight - weights[spair]) > 1e-5): next_reactions.update(pair_reactions[spair]) c1, c2 = spair weights[c1, c2] = pair_weight weights[c2, c1] = pair_weight return prediction, iteration
<SYSTEM_TASK:> Predict compound pairs for a single reaction. <END_TASK> <USER_TASK:> Description: def predict_compound_pairs(reaction, compound_formula, pair_weights={}, weight_func=element_weight): """Predict compound pairs for a single reaction. Performs greedy matching on reaction compounds using a scoring function that uses generalized Jaccard similarity corrected by the weights in the given dictionary. Returns a tuple of a transfer dictionary and a dictionary of unbalanced compounds. The dictionary of unbalanced compounds is empty only if the reaction is balanced. Args: reaction: :class:`psamm.reaction.Reaction`. compound_formula: Dictionary mapping compound IDs to :class:`psamm.formula.Formula`. Formulas must be flattened. pair_weights: Dictionary mapping pairs of compound IDs to correction values. This value is multiplied by the calculated Jaccard similarity. If a pair is not in the dictionary, the value 1 is used. Pairs are looked up in the weights dictionary as a tuple of compound names (``c1``, ``c2``) where ``c1`` is the left-hand side and ``c2`` is the right-hand side. weight_func: Weight function for caclulating the generalized Jaccard similarity. This function will be given an :class:`psamm.formula.Atom` or :class:`psamm.formula.Radical` and should return a corresponding weight. """
def score_func(inst1, inst2): score = _jaccard_similarity( inst1.formula, inst2.formula, weight_func) if score is None: return None pair = inst1.compound.name, inst2.compound.name pair_weight = pair_weights.get(pair, 1.0) return pair_weight * score return _match_greedily(reaction, compound_formula, score_func)
<SYSTEM_TASK:> Helper to access a Juju config option when charmhelpers is not available. <END_TASK> <USER_TASK:> Description: def config_get(option_name): """ Helper to access a Juju config option when charmhelpers is not available. :param str option_name: Name of the config option to get the value of """
try: raw = subprocess.check_output(['config-get', option_name, '--format=yaml']) return yaml.load(raw.decode('UTF-8')) except ValueError: return None
<SYSTEM_TASK:> Attempt to fetch all resources for a charm. <END_TASK> <USER_TASK:> Description: def fetch(which=None, mirror_url=None, resources_yaml='resources.yaml', force=False, reporthook=None): """ Attempt to fetch all resources for a charm. :param list which: A name, or a list of one or more resource names, to fetch. If ommitted, all non-optional resources are fetched. You can also pass ``jujuresources.ALL`` to fetch all optional *and* required resources. :param str mirror_url: Fetch resources from the given mirror. :param str resources_yaml: Location of the yaml file containing the resource descriptions (default: ``./resources.yaml``). Can be a local file name or a remote URL. :param force bool: Force re-downloading of valid resources. :param func reporthook: Callback for reporting download progress. Will be called once for each resource, just prior to fetching, and will be passed the resource name. :return: True or False indicating whether the resources were successfully downloaded. """
resources = _load(resources_yaml, None) if reporthook is None: reporthook = lambda r: juju_log('Fetching %s' % r, level='INFO') _fetch(resources, which, mirror_url, force, reporthook) failed = _invalid(resources, which) if failed: juju_log('Failed to fetch resource%s: %s' % ( 's' if len(failed) > 1 else '', ', '.join(failed) ), level='WARNING') else: juju_log('All resources successfully fetched', level='INFO') return not failed
<SYSTEM_TASK:> Rate limit a command function. <END_TASK> <USER_TASK:> Description: def rate(wait=MIN_WAIT, reps=REPS): """ Rate limit a command function. :param wait: How long to wait between commands. :param reps: How many times to send a command. :returns: Decorator. """
def decorator(function): """ Decorator function. :returns: Wrapper. """ def wrapper(self, *args, **kwargs): """ Wrapper. :param args: Passthrough positional arguments. :param kwargs: Passthrough keyword arguments. """ saved_wait = self.wait saved_reps = self.reps self.wait = wait self.reps = reps function(self, *args, **kwargs) self.wait = saved_wait self.reps = saved_reps return wrapper return decorator
<SYSTEM_TASK:> Turn on or off. <END_TASK> <USER_TASK:> Description: def on(self, state): """ Turn on or off. :param state: True (on) or False (off). """
self._on = state cmd = self.command_set.off() if state: cmd = self.command_set.on() self.send(cmd)
<SYSTEM_TASK:> Send a command to the bridge. <END_TASK> <USER_TASK:> Description: def send(self, cmd): """ Send a command to the bridge. :param cmd: List of command bytes. """
self._bridge.send(cmd, wait=self.wait, reps=self.reps)
<SYSTEM_TASK:> Start a pipeline. <END_TASK> <USER_TASK:> Description: def enqueue(self, pipeline): """ Start a pipeline. :param pipeline: Start this pipeline. """
copied = Pipeline().append(pipeline) copied.group = self self._queue.put(copied)
<SYSTEM_TASK:> Compute wait time. <END_TASK> <USER_TASK:> Description: def _wait(self, duration, steps, commands): """ Compute wait time. :param duration: Total time (in seconds). :param steps: Number of steps. :param commands: Number of commands. :returns: Wait in seconds. """
wait = ((duration - self.wait * self.reps * commands) / steps) - \ (self.wait * self.reps * self._bridge.active) return max(0, wait)
<SYSTEM_TASK:> Dispatch to the right subclass based on the definition. <END_TASK> <USER_TASK:> Description: def get(cls, name, definition, output_dir): """ Dispatch to the right subclass based on the definition. """
if 'url' in definition: return URLResource(name, definition, output_dir) elif 'pypi' in definition: return PyPIResource(name, definition, output_dir) else: return Resource(name, definition, output_dir)
<SYSTEM_TASK:> Run the pipeline queue. <END_TASK> <USER_TASK:> Description: def run(self): """ Run the pipeline queue. The pipeline queue will run forever. """
while True: self._event.clear() self._queue.get().run(self._event)
<SYSTEM_TASK:> Run the pipeline. <END_TASK> <USER_TASK:> Description: def run(self, stop): """ Run the pipeline. :param stop: Stop event """
_LOGGER.info("Starting a new pipeline on group %s", self._group) self._group.bridge.incr_active() for i, stage in enumerate(self._pipe): self._execute_stage(i, stage, stop) _LOGGER.info("Finished pipeline on group %s", self._group) self._group.bridge.decr_active()
<SYSTEM_TASK:> Append a pipeline to this pipeline. <END_TASK> <USER_TASK:> Description: def append(self, pipeline): """ Append a pipeline to this pipeline. :param pipeline: Pipeline to append. :returns: This pipeline. """
for stage in pipeline.pipe: self._pipe.append(stage) return self
<SYSTEM_TASK:> Add stage methods at runtime. <END_TASK> <USER_TASK:> Description: def _add_stage(self, name): """ Add stage methods at runtime. Stage methods all follow the same pattern. :param name: Stage name. """
def stage_func(self, *args, **kwargs): """ Stage function. :param args: Positional arguments. :param kwargs: Keyword arguments. :return: Pipeline (for method chaining). """ self._pipe.append(Stage(name, args, kwargs)) return self setattr(Pipeline, name, stage_func)
<SYSTEM_TASK:> Execute a pipeline stage. <END_TASK> <USER_TASK:> Description: def _execute_stage(self, index, stage, stop): """ Execute a pipeline stage. :param index: Stage index. :param stage: Stage object. """
if stop.is_set(): _LOGGER.info("Stopped pipeline on group %s", self._group) return _LOGGER.info(" -> Running stage '%s' on group %s", stage, self._group) if stage.name == 'on': self._group.on = True elif stage.name == 'off': self._group.on = False elif stage.name == 'hue': self._group.hue = stage.args[0] elif stage.name == 'saturation': self._group.saturation = stage.args[0] elif stage.name == 'color': self._group.color = Color(*stage.args) elif stage.name == 'brightness': self._group.brightness = stage.args[0] elif stage.name == 'temperature': self._group.temperature = stage.args[0] elif stage.name == 'transition': self._group.transition(*stage.args, **stage.kwargs) elif stage.name == 'white': self._group.white() elif stage.name == 'white_up': self._group.white_up() elif stage.name == 'white_down': self._group.white_down() elif stage.name == 'red_up': self._group.red_up() elif stage.name == 'red_down': self._group.red_down() elif stage.name == 'green_up': self._group.green_up() elif stage.name == 'green_down': self._group.green_down() elif stage.name == 'blue_up': self._group.blue_up() elif stage.name == 'blue_down': self._group.blue_down() elif stage.name == 'night_light': self._group.night_light() elif stage.name == 'flash': self._group.flash(**stage.kwargs) elif stage.name == 'repeat': self._repeat(index, stage, stop) elif stage.name == 'wait': time.sleep(*stage.args) elif stage.name == 'callback': stage.args[0](*stage.args[1:], **stage.kwargs)
<SYSTEM_TASK:> Repeat a stage. <END_TASK> <USER_TASK:> Description: def _repeat(self, index, stage, stop): """ Repeat a stage. :param index: Stage index. :param stage: Stage object to repeat. :param iterations: Number of iterations (default infinite). :param stages: Stages back to repeat (default 1). """
times = None if 'iterations' in stage.kwargs: times = stage.kwargs['iterations'] - 1 stages_back = 1 if 'stages' in stage.kwargs: stages_back = stage.kwargs['stages'] i = 0 while i != times: if stop.is_set(): break for forward in range(stages_back): if stop.is_set(): break stage_index = index - stages_back + forward self._execute_stage(stage_index, self._pipe[stage_index], stop) i += 1
<SYSTEM_TASK:> Set the brightness. <END_TASK> <USER_TASK:> Description: def brightness(self, brightness): """ Set the brightness. :param brightness: Value to set (0.0-1.0). """
try: cmd = self.command_set.brightness(brightness) self.send(cmd) self._brightness = brightness except AttributeError: self._setter('_brightness', brightness, self._dimmest, self._brightest, self._to_brightness)
<SYSTEM_TASK:> Step to a given brightness. <END_TASK> <USER_TASK:> Description: def _to_brightness(self, brightness): """ Step to a given brightness. :param brightness: Get to this brightness. """
self._to_value(self._brightness, brightness, self.command_set.brightness_steps, self._dimmer, self._brighter)
<SYSTEM_TASK:> Step to a value <END_TASK> <USER_TASK:> Description: def _to_value(self, current, target, max_steps, step_down, step_up): """ Step to a value :param current: Current value. :param target: Target value. :param max_steps: Maximum number of steps. :param step_down: Down function. :param step_up: Up function. """
for _ in range(steps(current, target, max_steps)): if (current - target) > 0: step_down() else: step_up()
<SYSTEM_TASK:> Group brightness as dim as possible. <END_TASK> <USER_TASK:> Description: def _dimmest(self): """ Group brightness as dim as possible. """
for _ in range(steps(self.brightness, 0.0, self.command_set.brightness_steps)): self._dimmer()
<SYSTEM_TASK:> Build up repos <END_TASK> <USER_TASK:> Description: def add_repo(self, repo_name, items): """ Build up repos `name` - Name of this repo. `items` - List of paths to rpm. """
juicer.utils.Log.log_debug("[CART:%s] Adding %s items to repo '%s'" % \ (self.cart_name, len(items), repo_name)) # We can't just straight-away add all of `items` to the # repo. `items` may be composed of a mix of local files, local # directories, remote files, and remote directories. We need # to filter and validate each item. items = juicer.utils.filter_package_list(items) cart_items = [] for item in items: juicer.utils.Log.log_debug("Creating CartObject for %s" % item) i = juicer.common.CartItem.CartItem(item) cart_items.append(i) self.repo_items_hash[repo_name] = cart_items
<SYSTEM_TASK:> Sign the items in the cart with a GPG key. <END_TASK> <USER_TASK:> Description: def sign_items(self, sign_with): """ Sign the items in the cart with a GPG key. After everything is collected and signed all the cart items are issued a refresh() to sync their is_signed attributes. `sign_with` is a reference to the method that implements juicer.common.RpmSignPlugin. """
cart_items = self.items() item_paths = [item.path for item in cart_items] sign_with(item_paths) for item in cart_items: item.refresh()
<SYSTEM_TASK:> Pull down all non-local items and save them into remotes_storage. <END_TASK> <USER_TASK:> Description: def sync_remotes(self, force=False): """ Pull down all non-local items and save them into remotes_storage. """
connectors = juicer.utils.get_login_info()[0] for repo, items in self.iterrepos(): repoid = "%s-%s" % (repo, self.current_env) for rpm in items: # don't bother syncing down if it's already in the pulp repo it needs to go to if not rpm.path.startswith(juicer.utils.pulp_repo_path(connectors[self.current_env], repoid)) or force: rpm.sync_to(self.remotes_storage) else: juicer.utils.Log.log_debug("Not syncing %s because it's already in pulp" % rpm.path)
<SYSTEM_TASK:> Build and return a list of all items in this cart <END_TASK> <USER_TASK:> Description: def items(self): """ Build and return a list of all items in this cart """
cart_items = [] for repo, items in self.iterrepos(): cart_items.extend(items) return cart_items
<SYSTEM_TASK:> Decorator to add args to subcommands. <END_TASK> <USER_TASK:> Description: def arg(*args, **kwargs): """ Decorator to add args to subcommands. """
def _arg(f): if not hasattr(f, '_subcommand_args'): f._subcommand_args = [] f._subcommand_args.append((args, kwargs)) return f return _arg
<SYSTEM_TASK:> Decorator to add sets of required mutually exclusive args to subcommands. <END_TASK> <USER_TASK:> Description: def argset(name, *args, **kwargs): """ Decorator to add sets of required mutually exclusive args to subcommands. """
def _arg(f): if not hasattr(f, '_subcommand_argsets'): f._subcommand_argsets = {} f._subcommand_argsets.setdefault(name, []).append((args, kwargs)) return f return _arg
<SYSTEM_TASK:> Create a local mirror of one or more resources. <END_TASK> <USER_TASK:> Description: def fetch(opts): """ Create a local mirror of one or more resources. """
resources = _load(opts.resources, opts.output_dir) if opts.all: opts.resource_names = ALL reporthook = None if opts.quiet else lambda name: print('Fetching {}...'.format(name)) if opts.verbose: backend.VERBOSE = True _fetch(resources, opts.resource_names, opts.mirror_url, opts.force, reporthook) return verify(opts)
<SYSTEM_TASK:> Verify that one or more resources were downloaded successfully. <END_TASK> <USER_TASK:> Description: def verify(opts): """ Verify that one or more resources were downloaded successfully. """
resources = _load(opts.resources, opts.output_dir) if opts.all: opts.resource_names = ALL invalid = _invalid(resources, opts.resource_names) if not invalid: if not opts.quiet: print("All resources successfully downloaded") return 0 else: if not opts.quiet: print("Invalid or missing resources: {}".format(', '.join(invalid))) return 1
<SYSTEM_TASK:> Return the full path to a named resource. <END_TASK> <USER_TASK:> Description: def resource_path(opts): """ Return the full path to a named resource. """
resources = _load(opts.resources, opts.output_dir) if opts.resource_name not in resources: sys.stderr.write('Invalid resource name: {}\n'.format(opts.resource_name)) return 1 print(resources[opts.resource_name].destination)
<SYSTEM_TASK:> Run a light-weight HTTP server hosting previously mirrored resources <END_TASK> <USER_TASK:> Description: def serve(opts): """ Run a light-weight HTTP server hosting previously mirrored resources """
resources = _load(opts.resources, opts.output_dir) opts.output_dir = resources.output_dir # allow resources.yaml to set default output_dir if not os.path.exists(opts.output_dir): sys.stderr.write("Resources dir '{}' not found. Did you fetch?\n".format(opts.output_dir)) return 1 backend.PyPIResource.build_pypi_indexes(opts.output_dir) os.chdir(opts.output_dir) HTTPServer.allow_reuse_address = True httpd = HTTPServer((opts.host, opts.port), SimpleHTTPRequestHandler) if opts.ssl_cert: httpd.socket = ssl.wrap_socket(httpd.socket, certfile=opts.ssl_cert, server_side=True) print("Serving at: http{}://{}:{}/".format( 's' if opts.ssl_cert else '', socket.gethostname(), opts.port)) httpd.serve_forever()
<SYSTEM_TASK:> Step to a given temperature. <END_TASK> <USER_TASK:> Description: def _to_temperature(self, temperature): """ Step to a given temperature. :param temperature: Get to this temperature. """
self._to_value(self._temperature, temperature, self.command_set.temperature_steps, self._warmer, self._cooler)
<SYSTEM_TASK:> Group temperature as warm as possible. <END_TASK> <USER_TASK:> Description: def _warmest(self): """ Group temperature as warm as possible. """
for _ in range(steps(self.temperature, 0.0, self.command_set.temperature_steps)): self._warmer()
<SYSTEM_TASK:> Group temperature as cool as possible. <END_TASK> <USER_TASK:> Description: def _coolest(self): """ Group temperature as cool as possible. """
for _ in range(steps(self.temperature, 1.0, self.command_set.temperature_steps)): self._cooler()
<SYSTEM_TASK:> Convert a string representing an IPv4 address to 4 bytes. <END_TASK> <USER_TASK:> Description: def ip_to_array(ipaddress): """Convert a string representing an IPv4 address to 4 bytes."""
res = [] for i in ipaddress.split("."): res.append(int(i)) assert len(res) == 4 return res
<SYSTEM_TASK:> Convert an length byte integer to an array of bytes. <END_TASK> <USER_TASK:> Description: def int_to_array(i, length=2): """Convert an length byte integer to an array of bytes."""
res = [] for dummy in range(0, length): res.append(i & 0xff) i = i >> 8 return reversed(res)
<SYSTEM_TASK:> Stop internal color pattern playing <END_TASK> <USER_TASK:> Description: def stop(self): """Stop internal color pattern playing """
if ( self.dev == None ): return '' buf = [REPORT_ID, ord('p'), 0, 0, 0, 0, 0, 0, 0] return self.write(buf);
<SYSTEM_TASK:> `repo_name` - Name of repository to create <END_TASK> <USER_TASK:> Description: def create_repo(self, repo_name=None, feed=None, envs=[], checksum_type="sha256", query='/repositories/'): """ `repo_name` - Name of repository to create `feed` - Repo URL to feed from `checksum_type` - Used for generating meta-data Create repository in specified environments, associate the yum_distributor with it and publish the repo """
data = {'display_name': repo_name, 'notes': { '_repo-type': 'rpm-repo', } } juicer.utils.Log.log_debug("Create Repo: %s", repo_name) for env in envs: if juicer.utils.repo_exists_p(repo_name, self.connectors[env], env): juicer.utils.Log.log_info("repo `%s` already exists in %s... skipping!", (repo_name, env)) continue else: data['relative_path'] = '/%s/%s/' % (env, repo_name) data['id'] = '-'.join([repo_name, env]) _r = self.connectors[env].post(query, data) if _r.status_code == Constants.PULP_POST_CREATED: imp_query = '/repositories/%s/importers/' % data['id'] imp_data = { 'importer_id': 'yum_importer', 'importer_type_id': 'yum_importer', 'importer_config': {}, } if feed: imp_data['importer_config']['feed_url'] = feed _r = self.connectors[env].post(imp_query, imp_data) dist_query = '/repositories/%s/distributors/' % data['id'] dist_data = {'distributor_id': 'yum_distributor', 'distributor_type_id': 'yum_distributor', 'distributor_config': { 'relative_url': '/%s/%s/' % (env, repo_name), 'http': True, 'https': True, 'checksum_type': checksum_type }, 'auto_publish': True, 'relative_path': '/%s/%s/' % (env, repo_name) } _r = self.connectors[env].post(dist_query, dist_data) if _r.status_code == Constants.PULP_POST_CREATED: pub_query = '/repositories/%s/actions/publish/' % data['id'] pub_data = {'id': 'yum_distributor'} _r = self.connectors[env].post(pub_query, pub_data) if _r.status_code == Constants.PULP_POST_ACCEPTED: juicer.utils.Log.log_info("created repo `%s` in %s", repo_name, env) else: _r.raise_for_status() else: _r.raise_for_status() return True
<SYSTEM_TASK:> `login` - Login or username for user <END_TASK> <USER_TASK:> Description: def create_user(self, login=None, password=None, user_name=None, envs=[], query='/users/'): """ `login` - Login or username for user `password` - Plain text password for user `user_name` - Full name of user Create user in specified environments """
login = login.lower() data = {'login': login, 'password': password[0], 'name': user_name} juicer.utils.Log.log_debug("Create User: %s ('%s')", login, user_name) for env in envs: if envs.index(env) != 0 and juicer.utils.env_same_host(env, envs[envs.index(env) - 1]): juicer.utils.Log.log_info("environment `%s` shares a host with environment `%s`... skipping!", (env, envs[envs.index(env) - 1])) continue elif juicer.utils.user_exists_p(login, self.connectors[env]): juicer.utils.Log.log_info("user `%s` already exists in %s... skipping!", (login, env)) continue else: _r = self.connectors[env].post(query, data) if _r.status_code == Constants.PULP_POST_CREATED: juicer.utils.Log.log_info("created user `%s` with login `%s` in %s", (user_name, login, env)) else: _r.raise_for_status() return True
<SYSTEM_TASK:> `repo_name` - Name of repository to delete <END_TASK> <USER_TASK:> Description: def delete_repo(self, repo_name=None, envs=[], query='/repositories/'): """ `repo_name` - Name of repository to delete Delete repo in specified environments """
orphan_query = '/content/orphans/rpm/' juicer.utils.Log.log_debug("Delete Repo: %s", repo_name) for env in self.args.envs: if not juicer.utils.repo_exists_p(repo_name, self.connectors[env], env): juicer.utils.Log.log_info("repo `%s` doesn't exist in %s... skipping!", (repo_name, env)) continue else: url = "%s%s-%s/" % (query, repo_name, env) _r = self.connectors[env].delete(url) if _r.status_code == Constants.PULP_DELETE_ACCEPTED: juicer.utils.Log.log_info("deleted repo `%s` in %s", (repo_name, env)) # if delete was successful, delete orphaned rpms _r = self.connectors[env].get(orphan_query) if _r.status_code is Constants.PULP_GET_OK: if len(juicer.utils.load_json_str(_r.content)) > 0: __r = self.connectors[env].delete(orphan_query) if __r.status_code is Constants.PULP_DELETE_ACCEPTED: juicer.utils.Log.log_debug("deleted orphaned rpms in %s." % env) else: juicer.utils.Log.log_error("unable to delete orphaned rpms in %s. a %s error was returned", (env, __r.status_code)) else: juicer.utils.Log.log_error("unable to get a list of orphaned rpms. encountered a %s error." % _r.status_code) else: _r.raise_for_status() return True
<SYSTEM_TASK:> `login` - Login or username of user to delete <END_TASK> <USER_TASK:> Description: def delete_user(self, login=None, envs=[], query='/users/'): """ `login` - Login or username of user to delete Delete user in specified environments """
juicer.utils.Log.log_debug("Delete User: %s", login) for env in envs: if envs.index(env) != 0 and juicer.utils.env_same_host(env, envs[envs.index(env) - 1]): juicer.utils.Log.log_info("environment `%s` shares a host with environment `%s`... skipping!", (env, envs[envs.index(env) - 1])) continue elif not juicer.utils.user_exists_p(login, self.connectors[env]): juicer.utils.Log.log_info("user `%s` doesn't exist in %s... skipping!", (login, env)) continue else: url = "%s%s/" % (query, login) _r = self.connectors[env].delete(url) if _r.status_code == Constants.PULP_DELETE_OK: juicer.utils.Log.log_info("deleted user `%s` in %s", (login, env)) else: _r.raise_for_status() return True
<SYSTEM_TASK:> Sync repository in specified environments <END_TASK> <USER_TASK:> Description: def sync_repo(self, repo_name=None, envs=[], query='/repositories/'): """ Sync repository in specified environments """
juicer.utils.Log.log_debug( "Sync Repo %s In: %s" % (repo_name, ",".join(envs))) data = { 'override_config': { 'verify_checksum': 'true', 'verify_size': 'true' }, } for env in envs: url = "%s%s-%s/actions/sync/" % (query, repo_name, env) juicer.utils.Log.log_info("%s:", env) _r = self.connectors[env].post(url, data) if _r.status_code == Constants.PULP_POST_ACCEPTED: juicer.utils.Log.log_info("`%s` sync scheduled" % repo_name) else: _r.raise_for_status() return True
<SYSTEM_TASK:> List repositories in specified environments <END_TASK> <USER_TASK:> Description: def list_repos(self, envs=[], query='/repositories/'): """ List repositories in specified environments """
juicer.utils.Log.log_debug( "List Repos In: %s", ", ".join(envs)) repo_lists = {} for env in envs: repo_lists[env] = [] for env in envs: _r = self.connectors[env].get(query) if _r.status_code == Constants.PULP_GET_OK: for repo in juicer.utils.load_json_str(_r.content): if re.match(".*-{0}$".format(env), repo['id']): repo_lists[env].append(repo['display_name']) else: _r.raise_for_status() return repo_lists
<SYSTEM_TASK:> List users in specified environments <END_TASK> <USER_TASK:> Description: def list_users(self, envs=[], query="/users/"): """ List users in specified environments """
juicer.utils.Log.log_debug( "List Users In: %s", ", ".join(envs)) for env in envs: juicer.utils.Log.log_info("%s:" % (env)) _r = self.connectors[env].get(query) if _r.status_code == Constants.PULP_GET_OK: for user in juicer.utils.load_json_str(_r.content): roles = user['roles'] if roles: user_roles = ', '.join(roles) else: user_roles = "None" juicer.utils.Log.log_info("\t%s - %s" % (user['login'], user_roles)) else: _r.raise_for_status() return True
<SYSTEM_TASK:> `login` - Login or username of user to add to `role` <END_TASK> <USER_TASK:> Description: def role_add(self, role=None, login=None, envs=[], query='/roles/'): """ `login` - Login or username of user to add to `role` `role` - Role to add user to Add user to role """
data = {'login': self.args.login} juicer.utils.Log.log_debug( "Add Role '%s' to '%s'", role, login) for env in self.args.envs: if not juicer.utils.role_exists_p(role, self.connectors[env]): juicer.utils.Log.log_info("role `%s` doesn't exist in %s... skipping!", (role, env)) continue elif not juicer.utils.user_exists_p(login, self.connectors[env]): juicer.utils.Log.log_info("user `%s` doesn't exist in %s... skipping!", (login, env)) else: url = "%s%s/users/" % (query, role) _r = self.connectors[env].post(url, data) if _r.status_code == Constants.PULP_POST_OK: juicer.utils.Log.log_info("added user `%s` to role `%s` in %s", (login, role, env)) else: _r.raise_for_status() return True
<SYSTEM_TASK:> `login` - Login or username of user <END_TASK> <USER_TASK:> Description: def show_user(self, login=None, envs=[], query='/users/'): """ `login` - Login or username of user Show user in specified environments """
juicer.utils.Log.log_debug("Show User: %s", login) # keep track of which iteration of environment we're in count = 0 for env in self.args.envs: count += 1 juicer.utils.Log.log_info("%s:", env) if not juicer.utils.user_exists_p(login, self.connectors[env]): juicer.utils.Log.log_info("user `%s` doesn't exist in %s... skipping!", (login, env)) continue else: url = "%s%s/" % (query, login) _r = self.connectors[env].get(url) if _r.status_code == Constants.PULP_GET_OK: user = juicer.utils.load_json_str(_r.content) juicer.utils.Log.log_info("Login: %s" % user['login']) juicer.utils.Log.log_info("Name: %s" % user['name']) juicer.utils.Log.log_info("Roles: %s" % ', '.join(user['roles'])) if count < len(envs): # just want a new line juicer.utils.Log.log_info("") else: _r.raise_for_status() return True
<SYSTEM_TASK:> List roles in specified environments <END_TASK> <USER_TASK:> Description: def list_roles(self, envs=[], query='/roles/'): """ List roles in specified environments """
juicer.utils.Log.log_debug("List Roles %s", ", ".join(envs)) count = 0 for env in envs: count += 1 rcount = 0 juicer.utils.Log.log_info("%s:", env) _r = self.connectors[env].get(query) if _r.status_code == Constants.PULP_GET_OK: roles = juicer.utils.load_json_str(_r.content) for role in roles: rcount += 1 juicer.utils.Log.log_info("Name: %s" % role['display_name']) juicer.utils.Log.log_info("Description: %s" % role['description']) juicer.utils.Log.log_info("ID: %s" % role['id']) juicer.utils.Log.log_info("Users: %s" % ', '.join(role['users'])) if rcount < len(roles): # just want a new line juicer.utils.Log.log_info("\n") if count < len(envs): # just want a new line juicer.utils.Log.log_info("\n") else: _r.raise_for_status() return True
<SYSTEM_TASK:> `login` - Login or username of user to update <END_TASK> <USER_TASK:> Description: def update_user(self, login=None, user_name=None, password=None, envs=[], query='/users/'): """ `login` - Login or username of user to update `user_name` - Updated full name of user `password` - Updated plain text password for user Update user information """
juicer.utils.Log.log_debug("Update user information %s" % login) login = login.lower() data = {'delta': {}} if not user_name and not password: raise JuicerError("Error: --name or --password must be present") if user_name: data['delta']['name'] = user_name if password: data['delta']['password'] = password[0] query = "%s%s/" % (query, login) for env in envs: juicer.utils.Log.log_info("%s:", env) if not juicer.utils.user_exists_p(login, self.connectors[env]): juicer.utils.Log.log_info("user `%s` does not exist in %s... skipping!", (login, env)) continue else: _r = self.connectors[env].put(query, data) if _r.status_code == Constants.PULP_PUT_OK: juicer.utils.Log.log_info("user %s updated" % juicer.utils.load_json_str(_r.content)['login']) else: _r.raise_for_status() return True
<SYSTEM_TASK:> Calculates what you need to do to make a pulp repo match a juicer repo def <END_TASK> <USER_TASK:> Description: def _diff(self): """Calculates what you need to do to make a pulp repo match a juicer repo def"""
j_cs = self.j['checksum_type'] j_feed = self.j['feed'] p_cs = self.p['checksum_type'] p_feed = self.p['feed'] # checksum is a distributor property # Is the pulp checksum wrong? if not p_cs == j_cs: juicer.utils.Log.log_debug("Pulp checksum_type does not match juicer") self.distributor_diff['distributor_config']['checksum_type'] = j_cs juicer.utils.Log.log_debug("distributor_config::checksum_type SHOULD BE %s" % j_cs) # feed is an importer property if not p_feed == j_feed: juicer.utils.Log.log_debug("Pulp feed does not match juicer") self.importer_diff['importer_config']['feed'] = j_feed juicer.utils.Log.log_debug("importer_config::feed SHOULD BE %s" % j_feed)
<SYSTEM_TASK:> Set the graphite key prefixes <END_TASK> <USER_TASK:> Description: def _set_prefixes(self, conf): """Set the graphite key prefixes :param dict conf: The configuration data """
if conf.get('legacy_namespace', 'y') in self.TRUE_VALUES: self.count_prefix = 'stats_counts' self.count_suffix = '' self.gauge_prefix = 'stats.gauges' self.timer_prefix = 'stats.timers' self.rate_prefix = 'stats' self.rate_suffix = '' else: global_prefix = conf.get('global_prefix', 'stats') self.count_prefix = '%s.%s' % (global_prefix, conf.get('prefix_counter', 'counters')) self.count_suffix = '.count' self.gauge_prefix = '%s.%s' % (global_prefix, conf.get('prefix_gauge', 'gauges')) self.timer_prefix = '%s.%s' % (global_prefix, conf.get('prefix_timer', 'timers')) self.rate_prefix = self.count_prefix self.rate_suffix = '.rate'
<SYSTEM_TASK:> given a list yield list at most self.max_batch_size in size <END_TASK> <USER_TASK:> Description: def _get_batches(self, items): """given a list yield list at most self.max_batch_size in size"""
for i in xrange(0, len(items), self.max_batch_size): yield items[i:i + self.max_batch_size]
<SYSTEM_TASK:> Periodically flush stats to graphite <END_TASK> <USER_TASK:> Description: def stats_flush(self): """ Periodically flush stats to graphite """
while True: try: eventlet.sleep(self.flush_interval) if self.debug: print "seen %d stats so far." % self.stats_seen print "current counters: %s" % self.counters if self.pickle_proto: payload = self.pickle_payload() if payload: for batch in payload: self.report_stats(batch) else: payload = self.plain_payload() if payload: self.report_stats(payload) except: # safety net self.logger.critical('Encountered error in stats_flush loop')
<SYSTEM_TASK:> obtain stats payload in batches of pickle format <END_TASK> <USER_TASK:> Description: def pickle_payload(self): """obtain stats payload in batches of pickle format"""
tstamp = int(time.time()) payload = [] for item in self.counters: payload.append(("%s.%s%s" % (self.rate_prefix, item, self.rate_suffix), (tstamp, self.counters[item] / self.flush_interval))) payload.append(("%s.%s%s" % (self.count_prefix, item, self.count_suffix), (tstamp, self.counters[item]))) self.counters[item] = 0 for key in self.timers: if len(self.timers[key]) > 0: self.process_timer_key(key, tstamp, payload, pickled=True) self.timers[key] = [] for key in self.gauges: payload.append(("%s.%s" % (self.gauge_prefix, key), (tstamp, self.gauges[key]))) self.gauges[key] = 0 if payload: batched_payload = [] for batch in self._get_batches(payload): if self.debug: print "pickling batch: %r" % batch serialized_data = pickle.dumps(batch, protocol=-1) length_prefix = struct.pack("!L", len(serialized_data)) batched_payload.append(length_prefix + serialized_data) return batched_payload return None
<SYSTEM_TASK:> obtain stats payload in plaintext format <END_TASK> <USER_TASK:> Description: def plain_payload(self): """obtain stats payload in plaintext format"""
tstamp = int(time.time()) payload = [] for item in self.counters: payload.append('%s.%s%s %s %s\n' % (self.rate_prefix, item, self.rate_suffix, self.counters[item] / self.flush_interval, tstamp)) payload.append('%s.%s%s %s %s\n' % (self.count_prefix, item, self.count_suffix, self.counters[item], tstamp)) self.counters[item] = 0 for key in self.timers: if len(self.timers[key]) > 0: self.process_timer_key(key, tstamp, payload) self.timers[key] = [] for key in self.gauges: payload.append("%s.%s %d %d\n" % (self.gauge_prefix, key, self.gauges[key], tstamp)) self.gauges[key] = 0 if self.debug: print payload if payload: return "".join(payload) else: return None
<SYSTEM_TASK:> Process a received gauge event <END_TASK> <USER_TASK:> Description: def process_gauge(self, key, fields): """ Process a received gauge event :param key: Key of timer :param fields: Received fields """
try: self.gauges[key] = float(fields[0]) if self.stats_seen >= maxint: self.logger.info("hit maxint, reset seen counter") self.stats_seen = 0 self.stats_seen += 1 except Exception as err: self.logger.info("error decoding gauge event: %s" % err) if self.debug: print "error decoding gauge event: %s" % err
<SYSTEM_TASK:> Process a received timer event <END_TASK> <USER_TASK:> Description: def process_timer(self, key, fields): """ Process a received timer event :param key: Key of timer :param fields: Received fields """
try: if key not in self.timers: self.timers[key] = [] self.timers[key].append(float(fields[0])) if self.stats_seen >= maxint: self.logger.info("hit maxint, reset seen counter") self.stats_seen = 0 self.stats_seen += 1 except Exception as err: self.logger.info("error decoding timer event: %s" % err) if self.debug: print "error decoding timer event: %s" % err
<SYSTEM_TASK:> Process a received counter event <END_TASK> <USER_TASK:> Description: def process_counter(self, key, fields): """ Process a received counter event :param key: Key of counter :param fields: Received fields """
sample_rate = 1.0 try: if len(fields) is 3: if self.ratecheck.match(fields[2]): sample_rate = float(fields[2].lstrip("@")) else: raise Exception("bad sample rate.") counter_value = float(fields[0] or 1) * (1 / float(sample_rate)) if key not in self.counters: self.counters[key] = 0 self.counters[key] += counter_value if self.stats_seen >= maxint: self.logger.info("hit maxint, reset seen counter") self.stats_seen = 0 self.stats_seen += 1 except Exception as err: self.logger.info("error decoding counter event: %s" % err) if self.debug: print "error decoding counter event: %s" % err
<SYSTEM_TASK:> Append the plain text graphite <END_TASK> <USER_TASK:> Description: def process_timer_key(self, key, tstamp, stack, pickled=False): """Append the plain text graphite :param str key: The timer key to process :param int tstamp: The timestamp for the data point :param list stack: The stack of metrics to append the output to """
self.timers[key].sort() values = {'count': len(self.timers[key]), 'low': min(self.timers[key]), 'high': max(self.timers[key]), 'total': sum(self.timers[key])} values['mean'] = values['low'] nth_percentile = 'upper_%i' % self.pct_threshold values[nth_percentile] = values['high'] if values['count']: threshold_idx = int(self.threshold_numerator * values['count']) - 1 values[nth_percentile] = self.timers[key][threshold_idx] values['mean'] = float(values['total']) / float(values['count']) for metric in values: if pickled: stack.append(("%s.%s.%s" % (self.timer_prefix, key, metric), (tstamp, values[metric]))) else: stack.append("%s.%s.%s %s %s\n" % (self.timer_prefix, key, metric, values[metric], tstamp))
<SYSTEM_TASK:> Decode and process the data from a received event. <END_TASK> <USER_TASK:> Description: def decode_recvd(self, data): """ Decode and process the data from a received event. :param data: Data to decode and process. """
bits = data.split(':') if len(bits) == 2: key = self.keycheck.sub('_', bits[0]) fields = bits[1].split("|") field_count = len(fields) if field_count >= 2: processor = self.processors.get(fields[1]) if processor: if self.debug: print "got key: %s %r" % (key, fields) processor(key, fields) else: print "error: unsupported stats type" print "key -> %s\nfields ->%s" % (key, fields) else: print "error (%s): not enough fields received" % key else: print "error: invalid request [%s]" % data[:40]
<SYSTEM_TASK:> Parse KNX group addresses and return the address as an integer. <END_TASK> <USER_TASK:> Description: def parse_group_address(addr): """Parse KNX group addresses and return the address as an integer. This allows to convert x/x/x and x/x address syntax to a numeric KNX group address """
if addr is None: raise KNXException("No address given") res = None if re.match('[0-9]+$', addr): res = int(addr) match = re.match("([0-9]+)/([0-9]+)$", addr) if match: main = match.group(1) sub = match.group(2) res = int(main) * 2048 + int(sub) match = re.match("([0-9]+)/([0-9]+)/([0-9]+)$", addr) if match: main = match.group(1) middle = match.group(2) sub = match.group(3) res = int(main) * 256 * 8 + int(middle) * 256 + int(sub) if res is None: raise KNXException("Address {} does not match any address scheme". format(addr)) return res
<SYSTEM_TASK:> Set the cached value for the given name <END_TASK> <USER_TASK:> Description: def set(self, name, value): """Set the cached value for the given name"""
old_val = self.values.get(name) if old_val != value: self.values[name] = value return True else: return False
<SYSTEM_TASK:> Sanitize all fields of the KNX message. <END_TASK> <USER_TASK:> Description: def sanitize(self): """Sanitize all fields of the KNX message."""
self.repeat = self.repeat % 2 self.priority = self.priority % 4 self.src_addr = self.src_addr % 0x10000 self.dst_addr = self.dst_addr % 0x10000 self.multicast = self.multicast % 2 self.routing = self.routing % 8 self.length = self.length % 16 for i in range(0, self.length - 1): self.data[i] = self.data[i] % 0x100
<SYSTEM_TASK:> Convert the object to its frame format. <END_TASK> <USER_TASK:> Description: def to_frame(self): """Convert the object to its frame format."""
self.sanitize() res = [] res.append((1 << 7) + (1 << 4) + (self.repeat << 5) + (self.priority << 2)) res.append(self.src_addr >> 8) res.append(self.src_addr % 0x100) res.append(self.dst_addr >> 8) res.append(self.dst_addr % 0x100) res.append((self.multicast << 7) + (self.routing << 4) + self.length) for i in range(0, self.length - 1): res.append(self.data[i]) checksum = 0 for i in range(0, 5 + self.length): checksum += res[i] res.append(checksum % 0x100) return bytearray(res)
<SYSTEM_TASK:> Create a KNXMessage object from the frame format. <END_TASK> <USER_TASK:> Description: def from_frame(cls, frame): """Create a KNXMessage object from the frame format."""
message = cls() # Check checksum first checksum = 0 for i in range(0, len(frame) - 1): checksum += frame[i] if (checksum % 0x100) != frame[len(frame) - 1]: raise KNXException('Checksum error in frame {}, ' 'expected {} but got {}' .format(tohex(frame), frame[len(frame) - 1], checksum % 0x100)) message.repeat = (frame[0] >> 5) & 0x01 message.priority = (frame[0] >> 2) & 0x03 message.src_addr = (frame[1] << 8) + frame[2] message.dst_addr = (frame[3] << 8) + frame[4] message.multicast = (frame[5] >> 7) message.routing = (frame[5] >> 4) & 0x03 message.length = frame[5] & 0x0f message.data = frame[6:-1] if len(message.data) + 1 != message.length: raise KNXException( 'Frame {} has not the correct length'.format(tohex(frame))) return message
<SYSTEM_TASK:> Using the specified input resource, assemble a list of rpm URLS. <END_TASK> <USER_TASK:> Description: def assemble_remotes(resource): """ Using the specified input resource, assemble a list of rpm URLS. This function will, when given a remote package url, directory index, or a combination of the two in a local input file, do all the work required to turn that input into a list of only remote package URLs. """
resource_type = classify_resource_type(resource) if resource_type is None: juicer.utils.Log.log_debug("Could not classify or find the input resource.") return [] elif resource_type == REMOTE_PKG_TYPE: return [resource] elif resource_type == REMOTE_INDEX_TYPE: return parse_directory_index(resource) elif resource_type == REMOTE_INPUT_FILE_TYPE: # Later on this could examine the excluded data for directory # indexes and iterate over those too. remote_packages, excluded_data = parse_input_file(resource) return remote_packages
<SYSTEM_TASK:> Determine if the specified resource is remote or local. <END_TASK> <USER_TASK:> Description: def classify_resource_type(resource): """ Determine if the specified resource is remote or local. We can handle three remote resource types from the command line, remote RPMs, directory indexes, and input files. They're classified by matching the following patterns: - Remote RPMS start with http[s] and end with .RPM - Directory indexes start with http[s] and don't end in .RPM - Input files don't match above, exist() on local filesystem """
if is_remote_package(resource): juicer.utils.Log.log_debug("Classified %s as a remote package" % resource) return REMOTE_PKG_TYPE elif is_directory_index(resource): juicer.utils.Log.log_debug("Classified %s as a directory index" % resource) return REMOTE_INDEX_TYPE elif exists(expanduser(resource)): juicer.utils.Log.log_debug("Classified %s as an input file" % resource) return REMOTE_INPUT_FILE_TYPE else: juicer.utils.Log.log_debug("Classified %s as unclassifiable" % resource) return None
<SYSTEM_TASK:> Classify the input resource as a remote RPM or not. <END_TASK> <USER_TASK:> Description: def is_remote_package(resource): """ Classify the input resource as a remote RPM or not. """
remote_regexp = re.compile(r"^https?://(.+).rpm$", re.I) result = remote_regexp.match(resource) if result is not None: juicer.utils.Log.log_debug("%s matches remote package regexp" % resource) return True else: juicer.utils.Log.log_debug("%s doesn't match remote package regexp" % resource) return False
<SYSTEM_TASK:> Classify the input resource as a directory index or not. <END_TASK> <USER_TASK:> Description: def is_directory_index(resource): """ Classify the input resource as a directory index or not. """
remote_regexp = re.compile(r"^https?://(.+)/?$", re.I) result = remote_regexp.match(resource) if result is not None: juicer.utils.Log.log_debug("%s matches directory index regexp" % resource) return True else: juicer.utils.Log.log_debug("%s doesn't match directory index regexp" % resource) return False
<SYSTEM_TASK:> Parse input file into remote packages and excluded data. <END_TASK> <USER_TASK:> Description: def parse_input_file(resource): """ Parse input file into remote packages and excluded data. In addition to garbage, excluded data includes directory indexes for the time being. This will be revisited after basic functionality has been fully implemented. """
input_resource = open(resource, 'r').read() remotes_list = [url for url in input_resource.split()] juicer.utils.Log.log_debug("Input file parsed into: %s\n" % str(remotes_list)) remote_packages = [pkg for pkg in remotes_list if is_remote_package(pkg) is True] juicer.utils.Log.log_debug("remote_packages filtered into %s\n" % str(remote_packages)) excluded_data = [datum for datum in remotes_list if datum not in remote_packages] juicer.utils.Log.log_debug("excluded_data filtered into %s\n" % str(excluded_data)) http_indexes = [index for index in excluded_data if is_directory_index(index)] remotes_from_indexes = reduce(lambda x, y: x + parse_directory_index(y), http_indexes, []) return (remote_packages + remotes_from_indexes, excluded_data)
<SYSTEM_TASK:> Retrieve a directory index and make a list of the RPMs listed. <END_TASK> <USER_TASK:> Description: def parse_directory_index(directory_index): """ Retrieve a directory index and make a list of the RPMs listed. """
# Normalize our URL style if not directory_index.endswith('/'): directory_index = directory_index + '/' site_index = urllib2.urlopen(directory_index) parsed_site_index = bs(site_index) rpm_link_tags = parsed_site_index.findAll('a', href=re.compile(r'.*rpm$')) # Only save the HREF attribute values from the links found rpm_names = [link['href'] for link in rpm_link_tags] # Join the index path with the discovered names so we only return complete paths remote_list = map(lambda end: "".join([directory_index, end]), rpm_names) return remote_list
<SYSTEM_TASK:> Convert a color temperature given in kelvin to an approximate RGB value. <END_TASK> <USER_TASK:> Description: def kelvin_to_rgb(kelvin): """ Convert a color temperature given in kelvin to an approximate RGB value. :param kelvin: Color temp in K :return: Tuple of (r, g, b), equivalent color for the temperature """
temp = kelvin / 100.0 # Calculate Red: if temp <= 66: red = 255 else: red = 329.698727446 * ((temp - 60) ** -0.1332047592) # Calculate Green: if temp <= 66: green = 99.4708025861 * math.log(temp) - 161.1195681661 else: green = 288.1221695283 * ((temp - 60) ** -0.0755148492) #Calculate Blue: if temp > 66: blue = 255 elif temp <= 19: blue = 0 else: blue = 138.5177312231 * math.log(temp - 10) - 305.0447927307 return tuple(correct_output(c) for c in (red, green, blue))
<SYSTEM_TASK:> Set color to white. <END_TASK> <USER_TASK:> Description: def white(self): """ Set color to white. """
self._color = RGB_WHITE cmd = self.command_set.white() self.send(cmd)
<SYSTEM_TASK:> Set the group brightness. <END_TASK> <USER_TASK:> Description: def brightness(self, brightness): """ Set the group brightness. :param brightness: Brightness in decimal percent (0.0-1.0). """
if brightness < 0 or brightness > 1: raise ValueError("Brightness must be a percentage " "represented as decimal 0-1.0") self._brightness = brightness cmd = self.command_set.brightness(brightness) self.send(cmd)
<SYSTEM_TASK:> Set the group hue. <END_TASK> <USER_TASK:> Description: def hue(self, hue): """ Set the group hue. :param hue: Hue in decimal percent (0.0-1.0). """
if hue < 0 or hue > 1: raise ValueError("Hue must be a percentage " "represented as decimal 0-1.0") self._hue = hue cmd = self.command_set.hue(hue) self.send(cmd)
<SYSTEM_TASK:> Main loop that should run in the background. <END_TASK> <USER_TASK:> Description: def updater_loop(self): """ Main loop that should run in the background. """
self.updater_running = True while (self.updater_running): self.send_updates() sleep(self.updateinterval)
<SYSTEM_TASK:> Starts a thread that runs the updater in the background. <END_TASK> <USER_TASK:> Description: def run_updater_in_background(self): """ Starts a thread that runs the updater in the background. """
thread = threading.Thread(target=self.updater_loop()) thread.daemon = True thread.start()
<SYSTEM_TASK:> Update the attributes of this CartItem. <END_TASK> <USER_TASK:> Description: def update(self, path): """ Update the attributes of this CartItem. """
self._reset() self.path = path self._refresh_synced() if self.is_synced: self._refresh_path() self._refresh_signed() self._refresh_nvr()
<SYSTEM_TASK:> Sync an RPM from a REMOTE to a LOCAL path. <END_TASK> <USER_TASK:> Description: def sync_to(self, destination): """ Sync an RPM from a REMOTE to a LOCAL path. Returns True if the item required a sync, False if it already existed locally. TODO: Remove dupe code in Cart.py:sync_remotes() """
rpm = RPM(self.path) rpm.sync(destination) if rpm.modified: juicer.utils.Log.log_debug("Source RPM modified. New 'path': %s" % rpm) self.update(rpm.path) return True return False
<SYSTEM_TASK:> Update our is_synced attribute accordingly. <END_TASK> <USER_TASK:> Description: def _refresh_synced(self): """ Update our is_synced attribute accordingly. """
if self.path.startswith('http'): juicer.utils.Log.log_debug("%s is not synced" % self.path) self.is_synced = False else: juicer.utils.Log.log_debug("%s is synced" % self.path) self.is_synced = True
<SYSTEM_TASK:> Does it exist? Can we read it? Is it an RPM? <END_TASK> <USER_TASK:> Description: def _refresh_path(self): """ Does it exist? Can we read it? Is it an RPM? """
# Unsynced items are remote so we can't check some of their # properties yet if os.path.exists(self.path): try: i = open(self.path, 'r') i.close() juicer.utils.Log.log_debug("Successfully read item at: %s" % self.path) except: raise IOError("Error while attempting to access item at path: %s" % self.path) else: raise IOError("Could not locate item at path: %s" % self.path)
<SYSTEM_TASK:> Refresh our name-version-release attributes. <END_TASK> <USER_TASK:> Description: def _refresh_nvr(self): """ Refresh our name-version-release attributes. """
rpm_info = juicer.utils.rpm_info(self.path) self.name = rpm_info['name'] self.version = rpm_info['version'] self.release = rpm_info['release']
<SYSTEM_TASK:> Used during update operations and when initialized. <END_TASK> <USER_TASK:> Description: def _reset(self): """ Used during update operations and when initialized. """
self.path = '' self.version = '' self.release = '' self.is_signed = False self.is_synced = False self.rpm = False
<SYSTEM_TASK:> Add bridge groups. <END_TASK> <USER_TASK:> Description: def add_bridge(self, bridge): """ Add bridge groups. :param bridge: Add groups from this bridge. """
for group in bridge.groups: self._groups[group.name] = group