code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def cli(env): settings = config.get_settings_from_client(env.client) env.fout(config.config_table(settings))
Show current configuration.
def __store_deactivated_components(self): deactivated_components = [] for node in foundations.walkers.nodes_walker(self.__model.root_node): if node.family == "Component": node.component.interface.activated or deactivated_components.append(node.component.name) LOGGER.debug("> Storing '{0}' deactivated Components.".format(", ".join(deactivated_components))) self.__settings.set_key("Settings", "deactivated_components", ",".join(deactivated_components))
Stores deactivated Components in settings file.
def new_concept(self, tag, clemma="", tokens=None, cidx=None, **kwargs): if cidx is None: cidx = self.new_concept_id() if tokens: tokens = (t if isinstance(t, Token) else self[t] for t in tokens) c = Concept(cidx=cidx, tag=tag, clemma=clemma, sent=self, tokens=tokens, **kwargs) return self.add_concept(c)
Create a new concept object and add it to concept list tokens can be a list of Token objects or token indices
def save_package_contents(self, root, team, owner, pkgname): assert isinstance(root, RootNode) instance_hash = hash_contents(root) pkg_path = self.package_path(team, owner, pkgname) if not os.path.isdir(pkg_path): os.makedirs(pkg_path) os.mkdir(os.path.join(pkg_path, self.CONTENTS_DIR)) os.mkdir(os.path.join(pkg_path, self.TAGS_DIR)) os.mkdir(os.path.join(pkg_path, self.VERSIONS_DIR)) dest = os.path.join(pkg_path, self.CONTENTS_DIR, instance_hash) with open(dest, 'w') as contents_file: json.dump(root, contents_file, default=encode_node, indent=2, sort_keys=True) tag_dir = os.path.join(pkg_path, self.TAGS_DIR) if not os.path.isdir(tag_dir): os.mkdir(tag_dir) latest_tag = os.path.join(pkg_path, self.TAGS_DIR, self.LATEST) with open (latest_tag, 'w') as tagfile: tagfile.write("{hsh}".format(hsh=instance_hash))
Saves the in-memory contents to a file in the local package repository.
def just_find_proxy(pacfile, url, host=None): if not os.path.isfile(pacfile): raise IOError('Pac file does not exist: {}'.format(pacfile)) init() parse_pac(pacfile) proxy = find_proxy(url,host) cleanup() return proxy
This function is a wrapper around init, parse_pac, find_proxy and cleanup. This is the function to call if you want to find proxy just for one url.
def findRoleID(self, name): for r in self: if r['name'].lower() == name.lower(): return r['id'] del r return None
searches the roles by name and returns the role's ID
def execute(self, sql, param=(), times=1): self.log and self.log.debug('%s %s' % ('SQL:', sql)) if param is not (): self.log and self.log.debug('%s %s' % ('PARAMs:', param)) for i in xrange(times): try: ret, res = self._execute(sql, param) return ret, res except Exception, e: self.log and self.log.warn("The %s time execute, fail" % i) self.log and self.log.warn(e) if i: sleep(i**1.5) self.log and self.log.error(e) return None, e
This function is the most use one, with the paramter times it will try x times to execute the sql, default is 1.
def groups_remove_owner(self, room_id, user_id, **kwargs): return self.__call_api_post('groups.removeOwner', roomId=room_id, userId=user_id, kwargs=kwargs)
Removes the role of owner from a user in the current Group.
def show(fig, width=600): img = StringIO() fig.savefig(img, format='svg') img.seek(0) print("%html <div style='width:{}px'>{}</div>".format(width, img.buf))
Renders a Matplotlib figure in Zeppelin. :param fig: a Matplotlib figure :param width: the width in pixel of the rendered figure, defaults to 600 Usage example:: import matplotlib.pyplot as plt from moztelemetry.zeppelin import show fig = plt.figure() plt.plot([1, 2, 3]) show(fig)
def from_dict(self, mapdict): self.name_format = mapdict["identifier"] try: self._fro = dict( [(k.lower(), v) for k, v in mapdict["fro"].items()]) except KeyError: pass try: self._to = dict([(k.lower(), v) for k, v in mapdict["to"].items()]) except KeyError: pass if self._fro is None and self._to is None: raise ConverterError("Missing specifications") if self._fro is None or self._to is None: self.adjust()
Import the attribute map from a dictionary :param mapdict: The dictionary
def ast_from_module(self, module, modname=None): modname = modname or module.__name__ if modname in self.astroid_cache: return self.astroid_cache[modname] try: filepath = module.__file__ if modutils.is_python_source(filepath): return self.ast_from_file(filepath, modname) except AttributeError: pass from astroid.builder import AstroidBuilder return AstroidBuilder(self).module_build(module, modname)
given an imported module, return the astroid object
def bucket(cls, bucket_name, connection=None): connection = cls.connection if connection == None else connection if bucket_name not in cls._buckets: connection = "{connection}/{bucket_name}".format(connection=connection, bucket_name=bucket_name) if cls.password: cls._buckets[connection] = Bucket(connection, password=cls.password) else: cls._buckets[connection] = Bucket(connection) return cls._buckets[connection]
Gives the bucket from couchbase server. :param bucket_name: Bucket name to fetch. :type bucket_name: str :returns: couchbase driver's Bucket object. :rtype: :class:`couchbase.client.Bucket` :raises: :exc:`RuntimeError` If the credentials wasn't set.
def prettyval(self, val): if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'): return "%x" % struct.unpack("<" + self.fmt, val) if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL): return "%x" % struct.unpack("<" + self.fmt, val) if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL): return hexdump(val) val = val.replace(b"\n", b"\\n") return "'%s'" % val.decode('utf-8', 'ignore')
returns the value in a readable format.
async def disable_digital_reporting(self, command): pin = int(command[0]) await self.core.disable_digital_reporting(pin)
Disable Firmata reporting for a digital pin. :param command: {"method": "disable_digital_reporting", "params": [PIN]} :returns: No return message.
def plot(self,**kwargs): try: import matplotlib.pyplot as plt except Exception as e: raise Exception("error importing matplotlib: {0}".format(str(e))) ax = kwargs.pop("ax",plt.subplot(111)) x = np.linspace(0,self.a*3,100) y = self.inv_h(x) ax.set_xlabel("distance") ax.set_ylabel("$\gamma$") ax.plot(x,y,**kwargs) return ax
get a cheap plot of the Vario2d Parameters ---------- **kwargs : (dict) keyword arguments to use for plotting Returns ------- ax : matplotlib.pyplot.axis Note ---- optional arguments in kwargs include "ax" (existing matplotlib.pyplot.axis). Other kwargs are passed to matplotlib.pyplot.plot()
def support(self, version): if not self._known_version(version): warn("unknown feature: %s"%version) return True else: if not self._get_featureset_support(version): warn("You are not supporting %s anymore "%str(version), UserWarning, self.level) if self._alone_version(version): warn("%s is the last supported feature of this group, you can simplifiy this logic. "%str(version), UserWarning,self.level) return self.predicates.get(version, True) if (not self.PY3_supported) or (not self.PY2_supported): warn("You are only supporting 1 version of Python", UserWarning, self.level) if version == PY3: return sys.version_info.major == 3 elif version == PY2: return sys.version_info.major == 2
return `True` if current python version match version passed. raise a deprecation warning if only PY2 or PY3 is supported as you probably have a conditional that should be removed.
def systemInformationType3(): a = L2PseudoLength(l2pLength=0x12) b = TpPd(pd=0x6) c = MessageType(mesType=0x1b) d = CellIdentity() e = LocalAreaId() f = ControlChannelDescription() g = CellOptionsBCCH() h = CellSelectionParameters() i = RachControlParameters() j = Si3RestOctets() packet = a / b / c / d / e / f / g / h / i / j return packet
SYSTEM INFORMATION TYPE 3 Section 9.1.35
def minify_printer( obfuscate=False, obfuscate_globals=False, shadow_funcname=False, drop_semi=False): active_rules = [rules.minify(drop_semi=drop_semi)] if obfuscate: active_rules.append(rules.obfuscate( obfuscate_globals=obfuscate_globals, shadow_funcname=shadow_funcname, reserved_keywords=(Lexer.keywords_dict.keys()) )) return Unparser(rules=active_rules)
Construct a minimum printer. Arguments obfuscate If True, obfuscate identifiers nested in each scope with a shortened identifier name to further reduce output size. Defaults to False. obfuscate_globals Also do the same to identifiers nested on the global scope; do not enable unless the renaming of global variables in a not fully deterministic manner into something else is guaranteed to not cause problems with the generated code and other code that in the same environment that it will be executed in. Defaults to False for the reason above. drop_semi Drop semicolons whenever possible (e.g. the final semicolons of a given block).
def print_torrent(self): print('Title: %s' % self.title) print('URL: %s' % self.url) print('Category: %s' % self.category) print('Sub-Category: %s' % self.sub_category) print('Magnet Link: %s' % self.magnet_link) print('Torrent Link: %s' % self.torrent_link) print('Uploaded: %s' % self.created) print('Comments: %d' % self.comments) print('Has Cover Image: %s' % self.has_cover) print('User Status: %s' % self.user_status) print('Size: %s' % self.size) print('User: %s' % self.user) print('Seeders: %d' % self.seeders) print('Leechers: %d' % self.leechers)
Print the details of a torrent
def prepare(self): global print_prepare T = self.tm.trace if T: T.write(self.trace_message(u'Task.prepare()', self.node)) self.exception_raise() if self.tm.message: self.display(self.tm.message) self.tm.message = None executor = self.targets[0].get_executor() if executor is None: return executor.prepare() for t in executor.get_action_targets(): if print_prepare: print("Preparing target %s..."%t) for s in t.side_effects: print("...with side-effect %s..."%s) t.prepare() for s in t.side_effects: if print_prepare: print("...Preparing side-effect %s..."%s) s.prepare()
Called just before the task is executed. This is mainly intended to give the target Nodes a chance to unlink underlying files and make all necessary directories before the Action is actually called to build the targets.
def initial_state(self) -> StateTensor: s0 = [] for fluent in self._compiler.compile_initial_state(self._batch_size): s0.append(self._output_size(fluent)) s0 = tuple(s0) return s0
Returns the initial state tensor.
def _validate_section(self, subject, coll, parts): cvgroup = ConstraintViolationGroup() cvgroup.subject = subject query = parts.cond.to_mongo(disjunction=False) query.update(parts.body.to_mongo()) cvgroup.condition = parts.cond.to_mongo(disjunction=False) self._log.debug('Query spec: {}'.format(query)) self._log.debug('Query fields: {}'.format(parts.report_fields)) cursor = coll.find(query, parts.report_fields, **self._find_kw) if parts.sampler is not None: cursor = parts.sampler.sample(cursor) nbytes, num_dberr, num_rec = 0, 0, 0 while 1: try: record = next(cursor) nbytes += total_size(record) num_rec += 1 except StopIteration: self._log.info("collection {}: {:d} records, {:d} bytes, {:d} db-errors" .format(subject, num_rec, nbytes, num_dberr)) break except pymongo.errors.PyMongoError as err: num_dberr += 1 if num_dberr > self._max_dberr > 0: raise DBError("Too many errors") self._log.warn("DB.{:d}: {}".format(num_dberr, err)) continue if self._progress: self._progress.update(num_dberr, nbytes) violations = self._get_violations(parts.body, record) cvgroup.add_violations(violations, record) return None if nbytes == 0 else cvgroup
Validate one section of a spec. :param subject: Name of subject :type subject: str :param coll: The collection to validate :type coll: pymongo.Collection :param parts: Section parts :type parts: Validator.SectionParts :return: Group of constraint violations, if any, otherwise None :rtype: ConstraintViolationGroup or None
def doc_uri(self, args, range=None): self.log.debug('doc_uri: in') self.send_at_position("DocUri", False, "point")
Request doc of whatever at cursor.
def fasta(self): fasta_str = '' max_line_length = 79 for p in self._molecules: if hasattr(p, 'sequence'): fasta_str += '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format( self.id.upper(), p.id) seq = p.sequence split_seq = [seq[i: i + max_line_length] for i in range(0, len(seq), max_line_length)] for seq_part in split_seq: fasta_str += '{0}\n'.format(seq_part) return fasta_str
Generates a FASTA string for the `Assembly`. Notes ----- Explanation of FASTA format: https://en.wikipedia.org/wiki/FASTA_format Recommendation that all lines of text be shorter than 80 characters is adhered to. Format of PDBID|CHAIN|SEQUENCE is consistent with files downloaded from the PDB. Uppercase PDBID used for consistency with files downloaded from the PDB. Useful for feeding into cdhit and then running sequence clustering. Returns ------- fasta_str : str String of the fasta file for the `Assembly`.
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True): fname = os.path.join( data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT) with io.open(fname, mode='r') as f: data = f.read() if split_blocks: return filter(None, data[:-1].split('\n')) return filter(None, data)
Read the gold standard blocks file corresponding to identifier ``fileroot`` in the gold standard blocks directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) split_blocks (bool): If True, split the file's content into blocks. Returns: str or List[str]
def _grouped_backends(cls, options, backend): "Group options by backend and filter out output group appropriately" if options is None: return [(backend or Store.current_backend, options)] dfltdict = defaultdict(dict) for spec, groups in options.items(): if 'output' not in groups.keys() or len(groups['output'])==0: dfltdict[backend or Store.current_backend][spec.strip()] = groups elif set(groups['output'].keys()) - set(['backend']): dfltdict[groups['output']['backend']][spec.strip()] = groups elif ['backend'] == list(groups['output'].keys()): filtered = {k:v for k,v in groups.items() if k != 'output'} dfltdict[groups['output']['backend']][spec.strip()] = filtered else: raise Exception('The output options group must have the backend keyword') return [(bk, bk_opts) for (bk, bk_opts) in dfltdict.items()]
Group options by backend and filter out output group appropriately
def get_pmg_structure(phonopy_structure): lattice = phonopy_structure.get_cell() frac_coords = phonopy_structure.get_scaled_positions() symbols = phonopy_structure.get_chemical_symbols() masses = phonopy_structure.get_masses() mms = phonopy_structure.get_magnetic_moments() mms = mms or [0] * len(symbols) return Structure(lattice, symbols, frac_coords, site_properties={"phonopy_masses": masses, "magnetic_moments": mms})
Convert a PhonopyAtoms object to pymatgen Structure object. Args: phonopy_structure (PhonopyAtoms): A phonopy structure object.
def add_paths(paths, base_path, operations): for operation, ns, rule, func in operations: path = build_path(operation, ns) if not path.startswith(base_path): continue method = operation.value.method.lower() suffix_start = 0 if len(base_path) == 1 else len(base_path) paths.setdefault( path[suffix_start:], swagger.PathItem(), )[method] = build_operation(operation, ns, rule, func)
Add paths to swagger.
async def set_config(cls, name: str, value): return await cls._handler.set_config(name=[name], value=[value])
Set a configuration value in MAAS. Consult your MAAS server for recognised settings. Alternatively, use the pre-canned functions also defined on this object.
def get_assessment_offered_form(self, *args, **kwargs): if isinstance(args[-1], list) or 'assessment_offered_record_types' in kwargs: return self.get_assessment_offered_form_for_create(*args, **kwargs) else: return self.get_assessment_offered_form_for_update(*args, **kwargs)
Pass through to provider AssessmentOfferedAdminSession.get_assessment_offered_form_for_update
def serialize_raw_master_key_prefix(raw_master_key): if raw_master_key.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.ASYMMETRIC: return to_bytes(raw_master_key.key_id) return struct.pack( ">{}sII".format(len(raw_master_key.key_id)), to_bytes(raw_master_key.key_id), raw_master_key.config.wrapping_key.wrapping_algorithm.algorithm.tag_len * 8, raw_master_key.config.wrapping_key.wrapping_algorithm.algorithm.iv_len, )
Produces the prefix that a RawMasterKey will always use for the key_info value of keys which require additional information. :param raw_master_key: RawMasterKey for which to produce a prefix :type raw_master_key: aws_encryption_sdk.key_providers.raw.RawMasterKey :returns: Serialized key_info prefix :rtype: bytes
def _convert_value(value): try: return restore(ast.literal_eval(value)) except (ValueError, SyntaxError): if SETTINGS.COMMAND_LINE.STRICT_PARSING: raise return value
Parse string as python literal if possible and fallback to string.
def from_sysdisk(cls, label): disks = cls.safe_call('hosting.disk.list', {'name': label}) if len(disks): return disks[0]['id']
Retrieve disk id from available system disks
def restore_byte_a0(byts): def replacement(match): "The function to apply when this regex matches." return match.group(0).replace(b'\x20', b'\xa0') return ALTERED_UTF8_RE.sub(replacement, byts)
Some mojibake has been additionally altered by a process that said "hmm, byte A0, that's basically a space!" and replaced it with an ASCII space. When the A0 is part of a sequence that we intend to decode as UTF-8, changing byte A0 to 20 would make it fail to decode. This process finds sequences that would convincingly decode as UTF-8 if byte 20 were changed to A0, and puts back the A0. For the purpose of deciding whether this is a good idea, this step gets a cost of twice the number of bytes that are changed. This is used as a step within `fix_encoding`.
def send_stop_signal(self, silent=False): if self.is_running(): self._stop_signal = True elif not silent: raise RuntimeError('Loop is currently not running')
Sends a stop signal to the loop thread :param silent: True/False value that specifies whether or not to raise RuntimeError if the loop is currently not running :return:
def field2default(self, field): ret = {} if "doc_default" in field.metadata: ret["default"] = field.metadata["doc_default"] else: default = field.missing if default is not marshmallow.missing and not callable(default): ret["default"] = default return ret
Return the dictionary containing the field's default value Will first look for a `doc_default` key in the field's metadata and then fall back on the field's `missing` parameter. A callable passed to the field's missing parameter will be ignored. :param Field field: A marshmallow field. :rtype: dict
def get_info(self, symbol): sym = self._get_symbol_info(symbol) if not sym: raise NoDataFoundException("Symbol does not exist.") ret = {} ret['chunk_count'] = sym[CHUNK_COUNT] ret['len'] = sym[LEN] ret['appended_rows'] = sym[APPEND_COUNT] ret['metadata'] = sym[METADATA] if METADATA in sym else None ret['chunker'] = sym[CHUNKER] ret['chunk_size'] = sym[CHUNK_SIZE] if CHUNK_SIZE in sym else 0 ret['serializer'] = sym[SERIALIZER] return ret
Returns information about the symbol, in a dictionary Parameters ---------- symbol: str the symbol for the given item in the DB Returns ------- dictionary
def get_shreds(self, feature_extractors, sheet_name): if self._shreds is None: shreds = [] _, contours, _ = cv2.findContours(self._foreground_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i, contour in enumerate(contours): shred = self._make_shred(contour, i, feature_extractors, sheet_name) if shred is not None: shreds.append(shred) self._shreds = shreds return self._shreds
Detects shreds in the current sheet and constructs Shred instances. Caches the results for further invocations. Args: feature_extractors: iterable of AbstractShredFeature instances to use for shreds feature assignment. sheet_name: string, included in shred attributes. Returns: list of Shred instances.
def profile_dir(name): if name: possible_path = Path(name) if possible_path.exists(): return possible_path profiles = list(read_profiles()) try: if name: profile = next(p for p in profiles if p.name == name) else: profile = next(p for p in profiles if p.default) except StopIteration: raise ProfileNotFoundError(name) return profile.path
Return path to FF profile for a given profile name or path.
def get_all_tags_names_as_list(self, admin=False, language="en"): if admin: if self._all_tags_cache_list_admin != {} and language in self._all_tags_cache_list_admin: return self._all_tags_cache_list_admin[language] else: if self._all_tags_cache_list != {} and language in self._all_tags_cache_list: return self._all_tags_cache_list[language] s_stud = set() s_admin = set() (common, _, org) = self.get_all_tags() for tag in common + org: tag_name_with_translation = self.gettext(language, tag.get_name()) if tag.get_name() else "" s_admin.add(tag_name_with_translation) if tag.is_visible_for_student(): s_stud.add(tag_name_with_translation) self._all_tags_cache_list_admin[language] = natsorted(s_admin, key=lambda y: y.lower()) self._all_tags_cache_list[language] = natsorted(s_stud, key=lambda y: y.lower()) if admin: return self._all_tags_cache_list_admin[language] return self._all_tags_cache_list[language]
Computes and cache two list containing all tags name sorted by natural order on name
def change_email(self, email): self.email_unconfirmed = email salt, hash = generate_sha1(self.username) self.email_confirmation_key = hash self.email_confirmation_key_created = get_datetime_now() self.save() self.send_confirmation_email() return self
Changes the email address for a user. A user needs to verify this new email address before it becomes active. By storing the new email address in a temporary field -- ``temporary_email`` -- we are able to set this email address after the user has verified it by clicking on the verification URI in the email. This email gets send out by ``send_verification_email``. :param email: The new email address that the user wants to use.
def count_empty(self, field): try: df2 = self.df[[field]] vals = where(df2.applymap(lambda x: x == '')) num = len(vals[0]) except Exception as e: self.err(e, "Can not count empty values") return self.ok("Found", num, "empty rows in column " + field)
List of empty row indices
def _skip_trampoline(handler): data_event, self = (yield None) delegate = handler event = None depth = 0 while True: def pass_through(): _trans = delegate.send(Transition(data_event, delegate)) return _trans, _trans.delegate, _trans.event if data_event is not None and data_event.type is ReadEventType.SKIP: while True: trans, delegate, event = pass_through() if event is not None: if event.event_type is IonEventType.CONTAINER_END and event.depth <= depth: break if event is None or event.event_type is IonEventType.INCOMPLETE: data_event, _ = yield Transition(event, self) else: trans, delegate, event = pass_through() if event is not None and (event.event_type is IonEventType.CONTAINER_START or event.event_type is IonEventType.CONTAINER_END): depth = event.depth data_event, _ = yield Transition(event, self)
Intercepts events from container handlers, emitting them only if they should not be skipped.
def _set_mtu_to_nics(self, conf): for dom_name, dom_spec in conf.get('domains', {}).items(): for idx, nic in enumerate(dom_spec.get('nics', [])): net = self._get_net(conf, dom_name, nic) mtu = net.get('mtu', 1500) if mtu != 1500: nic['mtu'] = mtu
For all the nics of all the domains in the conf that have MTU set, save the MTU on the NIC definition. Args: conf (dict): Configuration spec to extract the domains from Returns: None
def kvp_dict(d): return ', '.join( ["{}={}".format(k, quotable(v)) for k, v in d.items()])
Format dict to key=value pairs.
def from_graph(cls, graph, linear_energy_ranges, quadratic_energy_ranges): get_env().enable_infix_notation = True theta = cls.empty(dimod.SPIN) theta.add_offset(Symbol('offset', REAL)) def Linear(v): bias = Symbol('h_{}'.format(v), REAL) min_, max_ = linear_energy_ranges[v] theta.assertions.add(LE(bias, limitReal(max_))) theta.assertions.add(GE(bias, limitReal(min_))) return bias def Quadratic(u, v): bias = Symbol('J_{},{}'.format(u, v), REAL) if (v, u) in quadratic_energy_ranges: min_, max_ = quadratic_energy_ranges[(v, u)] else: min_, max_ = quadratic_energy_ranges[(u, v)] theta.assertions.add(LE(bias, limitReal(max_))) theta.assertions.add(GE(bias, limitReal(min_))) return bias for v in graph.nodes: theta.add_variable(v, Linear(v)) for u, v in graph.edges: theta.add_interaction(u, v, Quadratic(u, v)) return theta
Create Theta from a graph and energy ranges. Args: graph (:obj:`networkx.Graph`): Provides the structure for Theta. linear_energy_ranges (dict): A dict of the form {v: (min, max), ...} where min and max are the range of values allowed to v. quadratic_energy_ranges (dict): A dict of the form {(u, v): (min, max), ...} where min and max are the range of values allowed to (u, v). Returns: :obj:`.Theta`
def _walk_polyline(tid, intersect, T, mesh, plane, dist_tol): T = set(T) p = [] while True: p.append(intersect[1]) tid, intersections, T = get_next_triangle(mesh, T, plane, intersect, dist_tol) if tid is None: break assert len(intersections) == 2 if la.norm(intersections[0][1] - p[-1]) < dist_tol: intersect = intersections[1] else: assert la.norm(intersections[1][1] - p[-1]) < dist_tol, \ '%s not close to %s' % (str(p[-1]), str(intersections)) intersect = intersections[0] return p, T
Given an intersection, walk through the mesh triangles, computing intersection with the cut plane for each visited triangle and adding those intersection to a polyline.
def ext_pillar( mid, pillar, path, idkey='id', namespace=None, fieldnames=None, restkey=None, restval=None, dialect='excel'): with salt.utils.files.fopen(path, 'rb') as f: sheet = csv.DictReader(f, fieldnames, restkey=restkey, restval=restval, dialect=dialect) for row in sheet: if row[idkey] == mid: if namespace: return {namespace: row} else: return row return {}
Read a CSV into Pillar :param str path: Absolute path to a CSV file. :param str idkey: (Optional) The column name of minion IDs. :param str namespace: (Optional) A pillar key to namespace the values under. :param list fieldnames: (Optional) if the first row of the CSV is not column names they may be specified here instead.
def _register_hook(hook_name, target, func, *args, **kwargs): call = (func, args, kwargs) try: getattr(target, hook_name).append(call) except AttributeError: setattr(target, hook_name, [call])
Generic hook registration.
def flush(self): n = self.accumulator self.accumulator = 0 stream = self.stream stream.append(n) self.sum += n streamlen = len(stream) if streamlen > self.period: self.sum -= stream.popleft() streamlen -= 1 if streamlen == 0: self.last_average = 0 else: self.last_average = self.sum / streamlen
Add accumulator to the moving average queue and reset it. For example, called by the StatsCollector once per second to calculate per-second average.
def add_favicon_path(self, path: str) -> None: spec = web.URLSpec( '/(favicon.ico)', StaticFileHandler, dict(path=path) ) handlers = self.handlers[0][1] handlers.append(spec)
Add path to serve favicon file. ``path`` should be a directory, which contains favicon file (``favicon.ico``) for your app.
def collapse_witnesses(self): if self._matches.empty: self._matches.rename(columns={constants.SIGLUM_FIELDNAME: constants.SIGLA_FIELDNAME}, inplace=True) return self._matches.loc[:, constants.SIGLA_FIELDNAME] = \ self._matches[constants.SIGLUM_FIELDNAME] grouped = self._matches.groupby( [constants.WORK_FIELDNAME, constants.NGRAM_FIELDNAME, constants.COUNT_FIELDNAME], sort=False) def merge_sigla(df): merged = df[0:1] sigla = list(df[constants.SIGLA_FIELDNAME]) sigla.sort() merged[constants.SIGLUM_FIELDNAME] = ', '.join(sigla) return merged self._matches = grouped.apply(merge_sigla) del self._matches[constants.SIGLA_FIELDNAME] self._matches.rename(columns={constants.SIGLUM_FIELDNAME: constants.SIGLA_FIELDNAME}, inplace=True)
Groups together witnesses for the same n-gram and work that has the same count, and outputs a single row for each group. This output replaces the siglum field with a sigla field that provides a comma separated list of the witness sigla. Due to this, it is not necessarily possible to run other Results methods on results that have had their witnesses collapsed.
def scanl(f, n, ns): yield n for m in ns: n = f(n, m) yield n
Reduce ns by f starting with n yielding each intermediate value. tuple(scanl(f, n, ns))[-1] == reduce(f, ns, n) Parameters ---------- f : callable A binary function. n : any The starting value. ns : iterable of any The iterable to scan over. Yields ------ p : any The value of reduce(f, ns[:idx]) where idx is the current index. Examples -------- >>> import operator as op >>> tuple(scanl(op.add, 0, (1, 2, 3, 4))) (0, 1, 3, 6, 10)
def _set_visible(self, visibility, grid_index=None): if grid_index is None: for ax in self.flat_grid: ax.set_visible(visibility) else: if grid_index < 0 or grid_index >= len(self.grids): raise IndexError('Valid indices : 0 to {}'.format(len(self.grids) - 1)) for ax in self.grids[grid_index]: ax.set_visible(visibility)
Sets the visibility property of all axes.
def add_optionals(self, optionals_in, optionals_out): spec = self.spec if (not optionals_in) and (not optionals_out): return input_types = [datatypes.Array(dim) for (name, dim) in optionals_in] output_types = [datatypes.Array(dim) for (name, dim) in optionals_out] input_names = [str(name) for (name, dim) in optionals_in] output_names = [str(name) for (name, dim) in optionals_out] input_features = list(zip(input_names, input_types)) output_features = list(zip(output_names, output_types)) len_before_in = len(spec.description.input) len_before_out = len(spec.description.output) set_transform_interface_params(spec, input_features, output_features, True) for idx in range(len_before_in, len(spec.description.input)): spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE for idx in range(len_before_out, len(spec.description.output)): spec.description.output[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
Add optional inputs and outputs to the model spec. Parameters ---------- optionals_in: [str] List of inputs that are optionals. optionals_out: [str] List of outputs that are optionals. See Also -------- set_input, set_output
def incr(name, value=1, rate=1, tags=None): client().incr(name, value, rate, tags)
Increment a metric by value. >>> import statsdecor >>> statsdecor.incr('my.metric')
def _parse_tag(self): reset = self._head self._head += 1 try: tag = self._really_parse_tag() except BadRoute: self._head = reset self._emit_text("<") else: self._emit_all(tag)
Parse an HTML tag at the head of the wikicode string.
def process_event(self, event): for effect in reversed(self._effects): event = effect.process_event(event) if event is None: break return event
Process a new input event. This method will pass the event on to any Effects in reverse Z order so that the top-most Effect has priority. :param event: The Event that has been triggered. :returns: None if the Scene processed the event, else the original event.
def _dict_raise_on_duplicates(ordered_pairs): d = {} for k, v in ordered_pairs: if k in d: raise ValueError("duplicate key: %r" % (k,)) else: d[k] = v return d
Reject duplicate keys.
def call_api(self, table, column, value, **kwargs): try: output_format = kwargs.pop('output_format') except KeyError: output_format = self.output_format url_list = [self.base_url, table, column, quote(value), 'rows'] rows_count = self._number_of_rows(**kwargs) url_list.append(rows_count) url_string = '/'.join(url_list) xml_data = urlopen(url_string).read() data = self._format_data(output_format, xml_data) return data
Exposed method to connect and query the EPA's API.
def write_byte_data(self, i2c_addr, register, value, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA ) msg.data.contents.byte = value ioctl(self.fd, I2C_SMBUS, msg)
Write a byte to a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to write to :type register: int :param value: Byte value to transmit :type value: int :param force: :type force: Boolean :rtype: None
def document_path(cls, project, knowledge_base, document): return google.api_core.path_template.expand( 'projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}', project=project, knowledge_base=knowledge_base, document=document, )
Return a fully-qualified document string.
def _check_prompts(pre_prompt, post_prompt): if not isinstance(pre_prompt, str): raise TypeError("The pre_prompt was not a string!") if post_prompt is not _NO_ARG and not isinstance(post_prompt, str): raise TypeError("The post_prompt was given and was not a string!")
Check that the prompts are strings
def _parse_meta(self, meta): for hostname, hostvars in meta.get('hostvars', {}).items(): for var_key, var_val in hostvars.items(): self._get_host(hostname)['hostvars'][var_key] = var_val
Parse the _meta element from a dynamic host inventory output.
def spectrogram_to_mel_matrix(num_mel_bins=20, num_spectrogram_bins=129, audio_sample_rate=8000, lower_edge_hertz=125.0, upper_edge_hertz=3800.0): nyquist_hertz = audio_sample_rate / 2. if lower_edge_hertz < 0.0: raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz) if lower_edge_hertz >= upper_edge_hertz: raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" % (lower_edge_hertz, upper_edge_hertz)) if upper_edge_hertz > nyquist_hertz: raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" % (upper_edge_hertz, nyquist_hertz)) spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins) spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz) band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz), hertz_to_mel(upper_edge_hertz), num_mel_bins + 2) mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins)) for i in range(num_mel_bins): lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3] lower_slope = ((spectrogram_bins_mel - lower_edge_mel) / (center_mel - lower_edge_mel)) upper_slope = ((upper_edge_mel - spectrogram_bins_mel) / (upper_edge_mel - center_mel)) mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope, upper_slope)) mel_weights_matrix[0, :] = 0.0 return mel_weights_matrix
Return a matrix that can post-multiply spectrogram rows to make mel. Returns a np.array matrix A that can be used to post-multiply a matrix S of spectrogram values (STFT magnitudes) arranged as frames x bins to generate a "mel spectrogram" M of frames x num_mel_bins. M = S A. The classic HTK algorithm exploits the complementarity of adjacent mel bands to multiply each FFT bin by only one mel weight, then add it, with positive and negative signs, to the two adjacent mel bands to which that bin contributes. Here, by expressing this operation as a matrix multiply, we go from num_fft multiplies per frame (plus around 2*num_fft adds) to around num_fft^2 multiplies and adds. However, because these are all presumably accomplished in a single call to np.dot(), it's not clear which approach is faster in Python. The matrix multiplication has the attraction of being more general and flexible, and much easier to read. Args: num_mel_bins: How many bands in the resulting mel spectrum. This is the number of columns in the output matrix. num_spectrogram_bins: How many bins there are in the source spectrogram data, which is understood to be fft_size/2 + 1, i.e. the spectrogram only contains the nonredundant FFT bins. audio_sample_rate: Samples per second of the audio at the input to the spectrogram. We need this to figure out the actual frequencies for each spectrogram bin, which dictates how they are mapped into mel. lower_edge_hertz: Lower bound on the frequencies to be included in the mel spectrum. This corresponds to the lower edge of the lowest triangular band. upper_edge_hertz: The desired top edge of the highest frequency band. Returns: An np.array with shape (num_spectrogram_bins, num_mel_bins). Raises: ValueError: if frequency edges are incorrectly ordered or out of range.
def write(self, output): w = c_int32() self.WriteAnalogF64(self.bufsize, 0, 10.0, DAQmx_Val_GroupByChannel, output, w, None);
Writes the data to be output to the device buffer, output will be looped when the data runs out :param output: data to output :type output: numpy.ndarray
def add_features(self, kept_indices, new_indices, new_state_vec, new_state_cov, new_noise_var): assert len(kept_indices) == len(self.state_vec) assert len(new_indices) == len(new_state_vec) assert len(new_indices) == len(new_state_cov) assert len(new_indices) == len(new_noise_var) if self.has_cached_obs_vec: del self.obs_vec if self.has_cached_predicted_state_vec: del self.predicted_obs_vec nfeatures = len(kept_indices) + len(new_indices) next_state_vec = np.zeros((nfeatures, self.state_len)) next_state_cov = np.zeros((nfeatures, self.state_len, self.state_len)) next_noise_var = np.zeros((nfeatures, self.state_len)) if len(kept_indices) > 0: next_state_vec[kept_indices] = self.state_vec next_state_cov[kept_indices] = self.state_cov next_noise_var[kept_indices] = self.noise_var if len(self.state_noise_idx) > 0: self.state_noise_idx = kept_indices[self.state_noise_idx] if len(new_indices) > 0: next_state_vec[new_indices] = new_state_vec next_state_cov[new_indices] = new_state_cov next_noise_var[new_indices] = new_noise_var self.state_vec = next_state_vec self.state_cov = next_state_cov self.noise_var = next_noise_var
Add new features to the state kept_indices - the mapping from all indices in the state to new indices in the new version new_indices - the indices of the new features in the new version new_state_vec - the state vectors for the new indices new_state_cov - the covariance matrices for the new indices new_noise_var - the noise variances for the new indices
def set_thresh(thresh,p=False,hostname=None): driver_send("SET_THRESHNEW %s *%s" % (str(thresh),"p" if p else ""),hostname=hostname)
Sets the level of the threshold slider. If ``p==True`` will be interpreted as a _p_-value
def transform(function): def transform_fn(_, result): if isinstance(result, Nothing): return result lgr.debug("Transforming %r with %r", result, function) try: return function(result) except: exctype, value, tb = sys.exc_info() try: new_exc = StyleFunctionError(function, exctype, value) new_exc.__cause__ = None six.reraise(StyleFunctionError, new_exc, tb) finally: del tb return transform_fn
Return a processor for a style's "transform" function.
def created(self): timestamp, current = self._created if timestamp.endswith('ago'): quantity, kind, ago = timestamp.split() quantity = int(quantity) if 'sec' in kind: current -= quantity elif 'min' in kind: current -= quantity * 60 elif 'hour' in kind: current -= quantity * 60 * 60 return datetime.datetime.fromtimestamp(current) current = datetime.datetime.fromtimestamp(current) timestamp = timestamp.replace( 'Y-day', str(current.date() - datetime.timedelta(days=1))) timestamp = timestamp.replace('Today', current.date().isoformat()) try: return dateutil.parser.parse(timestamp) except: return current
Attempt to parse the human readable torrent creation datetime.
def has_parent_bins(self, bin_id): if self._catalog_session is not None: return self._catalog_session.has_parent_catalogs(catalog_id=bin_id) return self._hierarchy_session.has_parents(id_=bin_id)
Tests if the ``Bin`` has any parents. arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if the bin has parents, ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def handle_bodhi(msg): if 'bodhi.update.comment' in msg.topic: username = msg.msg['comment']['author'] elif 'bodhi.buildroot_override' in msg.topic: username = msg.msg['override']['submitter'] else: username = msg.msg.get('update', {}).get('submitter') return username
Given a bodhi message, return the FAS username.
def _on_hid_pnp(self, w_param, l_param): "Process WM_DEVICECHANGE system messages" new_status = "unknown" if w_param == DBT_DEVICEARRIVAL: notify_obj = None if int(l_param): notify_obj = DevBroadcastDevInterface.from_address(l_param) if notify_obj and \ notify_obj.dbcc_devicetype == DBT_DEVTYP_DEVICEINTERFACE: new_status = "connected" elif w_param == DBT_DEVICEREMOVECOMPLETE: notify_obj = None if int(l_param): notify_obj = DevBroadcastDevInterface.from_address(l_param) if notify_obj and \ notify_obj.dbcc_devicetype == DBT_DEVTYP_DEVICEINTERFACE: new_status = "disconnected" if new_status != "unknown" and new_status != self.current_status: self.current_status = new_status self.on_hid_pnp(self.current_status) return True
Process WM_DEVICECHANGE system messages
def updated(self, user): for who, what, old, new in self.history(user): if (what == "comment" or what == "description") and new != "": return True return False
True if the user commented the ticket in given time frame
def count(self, with_limit_and_skip=False): validate_boolean("with_limit_and_skip", with_limit_and_skip) cmd = SON([("count", self.__collection.name), ("query", self.__spec)]) if self.__max_time_ms is not None: cmd["maxTimeMS"] = self.__max_time_ms if self.__comment: cmd["$comment"] = self.__comment if self.__hint is not None: cmd["hint"] = self.__hint if with_limit_and_skip: if self.__limit: cmd["limit"] = self.__limit if self.__skip: cmd["skip"] = self.__skip return self.__collection._count(cmd, self.__collation)
Get the size of the results set for this query. Returns the number of documents in the results set for this query. Does not take :meth:`limit` and :meth:`skip` into account by default - set `with_limit_and_skip` to ``True`` if that is the desired behavior. Raises :class:`~pymongo.errors.OperationFailure` on a database error. When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint` applied to the query. In the following example the hint is passed to the count command: collection.find({'field': 'value'}).hint('field_1').count() The :meth:`count` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `with_limit_and_skip` (optional): take any :meth:`limit` or :meth:`skip` that has been applied to this cursor into account when getting the count .. note:: The `with_limit_and_skip` parameter requires server version **>= 1.1.4-** .. versionchanged:: 2.8 The :meth:`~count` method now supports :meth:`~hint`.
def transport_service(self, **kwargs): vlan = kwargs.pop('vlan') service_id = kwargs.pop('service_id') callback = kwargs.pop('callback', self._callback) if not pynos.utilities.valid_vlan_id(vlan, extended=True): raise InvalidVlanId("vlan must be between `1` and `8191`") service_args = dict(name=vlan, transport_service=service_id) transport_service = getattr(self._interface, 'interface_vlan_interface_vlan_' 'transport_service') config = transport_service(**service_args) return callback(config)
Configure VLAN Transport Service. Args: vlan (str): The VLAN ID. service_id (str): The transport-service ID. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `vlan` or `service_id` is not specified. ValueError: if `vlan` is invalid. Examples: >>> # Skip due to current work in devel >>> # TODO: Reenable >>> def test_transport_service(): ... import pynos.device ... switches = ['10.24.39.212', '10.24.39.202'] ... auth = ('admin', 'password') ... vlan = '6666' ... service_id = '1' ... for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.add_vlan_int(vlan) ... output = dev.interface.spanning_tree_state( ... int_type='vlan', name=vlan, enabled=False) ... output = dev.interface.transport_service(vlan=vlan, ... service_id=service_id) ... dev.interface.transport_service() ... # doctest: +IGNORE_EXCEPTION_DETAIL >>> test_transport_service() # doctest: +SKIP
def remove_sonos_playlist(self, sonos_playlist): object_id = getattr(sonos_playlist, 'item_id', sonos_playlist) return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
Remove a Sonos playlist. Args: sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove or the item_id (str). Returns: bool: True if succesful, False otherwise Raises: SoCoUPnPException: If sonos_playlist does not point to a valid object.
def safe_join(directory, *pathnames): parts = [directory] for filename in pathnames: if filename != "": filename = posixpath.normpath(filename) for sep in _os_alt_seps: if sep in filename: return None if os.path.isabs(filename) or filename == ".." or filename.startswith("../"): return None parts.append(filename) return posixpath.join(*parts)
Safely join `directory` and one or more untrusted `pathnames`. If this cannot be done, this function returns ``None``. :param directory: the base directory. :param pathnames: the untrusted pathnames relative to that directory.
def from_model(cls, document): return cls(meta={'id': document.id}, **cls.serialize(document))
By default use the ``to_dict`` method and exclude ``_id``, ``_cls`` and ``owner`` fields
def get_learning_curves(self, lc_extractor=extract_HBS_learning_curves, config_ids=None): config_ids = self.data.keys() if config_ids is None else config_ids lc_dict = {} for id in config_ids: runs = self.get_runs_by_id(id) lc_dict[id] = lc_extractor(runs) return(lc_dict)
extracts all learning curves from all run configurations Parameters ---------- lc_extractor: callable a function to return a list of learning_curves. defaults to hpbanster.HB_result.extract_HP_learning_curves config_ids: list of valid config ids if only a subset of the config ids is wanted Returns ------- dict a dictionary with the config_ids as keys and the learning curves as values
def write(self, message): message.id = message.id or self.writer.next_message_id() if message.message_type in self.CALL_REQ_TYPES: message_factory = self.request_message_factory else: message_factory = self.response_message_factory fragments = message_factory.fragment(message) return self._write_fragments(fragments)
Writes the given message up the wire. Does not expect a response back for the message. :param message: Message to write.
def focus_prev(self): mid = self.get_selected_mid() localroot = self._sanitize_position((mid,)) if localroot == self.get_focus()[1]: newpos = self._tree.prev_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) else: newpos = localroot if newpos is not None: self.body.set_focus(newpos)
focus previous message in depth first order
def wait_for_service_tasks_all_unchanged( service_name, old_task_ids, task_predicate=None, timeout_sec=30 ): try: time_wait( lambda: tasks_missing_predicate(service_name, old_task_ids, task_predicate), timeout_seconds=timeout_sec) except TimeoutExpired: return timeout_sec raise DCOSException("One or more of the following tasks were no longer found: {}".format(old_task_ids))
Returns after verifying that NONE of old_task_ids have been removed or replaced from the service :param service_name: the service name :type service_name: str :param old_task_ids: list of original task ids as returned by get_service_task_ids :type old_task_ids: [str] :param task_predicate: filter to use when searching for tasks :type task_predicate: func :param timeout_sec: duration to wait until assuming tasks are unchanged :type timeout_sec: int :return: the duration waited in seconds (the timeout value) :rtype: int
def makeAsn(segID,N, CA, C, O, geo): CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_OD1_length=geo.CG_OD1_length CB_CG_OD1_angle=geo.CB_CG_OD1_angle CA_CB_CG_OD1_diangle=geo.CA_CB_CG_OD1_diangle CG_ND2_length=geo.CG_ND2_length CB_CG_ND2_angle=geo.CB_CG_ND2_angle CA_CB_CG_ND2_diangle=geo.CA_CB_CG_ND2_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") oxygen_d1= calculateCoordinates(CA, CB, CG, CG_OD1_length, CB_CG_OD1_angle, CA_CB_CG_OD1_diangle) OD1= Atom("OD1", oxygen_d1, 0.0, 1.0, " ", " OD1", 0, "O") nitrogen_d2= calculateCoordinates(CA, CB, CG, CG_ND2_length, CB_CG_ND2_angle, CA_CB_CG_ND2_diangle) ND2= Atom("ND2", nitrogen_d2, 0.0, 1.0, " ", " ND2", 0, "N") res= Residue((' ', segID, ' '), "ASN", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(OD1) res.add(ND2) return res
Creates an Asparagine residue
def set_zerg_server_params(self, socket, clients_socket_pool=None): if clients_socket_pool: self._set('zergpool', '%s:%s' % (socket, ','.join(listify(clients_socket_pool))), multi=True) else: self._set('zerg-server', socket) return self._section
Zerg mode. Zerg server params. When your site load is variable, it would be nice to be able to add workers dynamically. Enabling Zerg mode you can allow zerg clients to attach to your already running server and help it in the work. * http://uwsgi-docs.readthedocs.io/en/latest/Zerg.html :param str|unicode socket: Unix socket to bind server to. Examples: * unix socket - ``/var/run/mutalisk`` * Linux abstract namespace - ``@nydus`` :param str|unicode|list[str|unicode] clients_socket_pool: This enables Zerg Pools. .. note:: Expects master process. Accepts sockets that will be mapped to Zerg socket. * http://uwsgi-docs.readthedocs.io/en/latest/Zerg.html#zerg-pools
def deep_encode(s, encoding='utf-8', errors='strict'): s = deep_encode(s) if sys.version_info.major < 3 and isinstance(s, unicode): return s.encode(encoding, errors) if isinstance(s, (list, tuple)): return [deep_encode(i, encoding=encoding, errors=errors) for i in s] if isinstance(s, dict): return dict([ ( deep_encode(key, encoding=encoding, errors=errors), deep_encode(s[key], encoding=encoding, errors=errors) ) for key in s ]) return s
Encode "DEEP" S using the codec registered for encoding.
def full_path(self): return self.normalize_path(self.directory_sep().join((self.start_path(), self.session_path())))
Return a full path to a current session directory. A result is made by joining a start path with current session directory :return: str
def filename(self): fname = self.raw_filename if not isinstance(fname, unicode): fname = fname.decode('utf8', 'ignore') fname = normalize('NFKD', fname) fname = fname.encode('ASCII', 'ignore').decode('ASCII') fname = os.path.basename(fname.replace('\\', os.path.sep)) fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip() fname = re.sub(r'[-\s]+', '-', fname).strip('.-') return fname[:255] or 'empty'
Name of the file on the client file system, but normalized to ensure file system compatibility. An empty filename is returned as 'empty'. Only ASCII letters, digits, dashes, underscores and dots are allowed in the final filename. Accents are removed, if possible. Whitespace is replaced by a single dash. Leading or tailing dots or dashes are removed. The filename is limited to 255 characters.
def update(self, reseed): if self._clear: for i in range(0, 3): self._screen.print_at(" ", self._x, self._screen.start_line + self._y + i) self._maybe_reseed(reseed) else: for i in range(0, 3): self._screen.print_at(chr(randint(32, 126)), self._x, self._screen.start_line + self._y + i, Screen.COLOUR_GREEN) for i in range(4, 6): self._screen.print_at(chr(randint(32, 126)), self._x, self._screen.start_line + self._y + i, Screen.COLOUR_GREEN, Screen.A_BOLD) self._maybe_reseed(reseed)
Update that trail! :param reseed: Whether we are in the normal reseed cycle or not.
def exchange_reference(root_url, service, version): root_url = root_url.rstrip('/') if root_url == OLD_ROOT_URL: return 'https://references.taskcluster.net/{}/{}/exchanges.json'.format(service, version) else: return '{}/references/{}/{}/exchanges.json'.format(root_url, service, version)
Generate URL for a Taskcluster exchange reference.
def faucet(self): if hasattr(self, 'faucets'): if len(self.faucets) > 1: raise TypeError("Only one faucet per account.") return self.faucets[0] raise AttributeError("There is no faucet assigned.")
Show current linked faucet.
def __get_default_currency(self): if sys.platform == "win32": def_curr = self.book["default-currency"] = self.__get_default_currency_windows() else: def_curr = self.book["default-currency"] = self.__get_locale_currency() return def_curr
Read the default currency from GnuCash preferences
def clear(self): if self.default_value is None: self.current_value = bytearray() else: self.current_value = bytearray(self.default_value)
Clear this config variable to its reset value.
def list_nodes(full=False, call=None): if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) ret = {} if POLL_ALL_LOCATIONS: for location in JOYENT_LOCATIONS: result = query(command='my/machines', location=location, method='GET') if result[0] in VALID_RESPONSE_CODES: nodes = result[1] for node in nodes: if 'name' in node: node['location'] = location ret[node['name']] = reformat_node(item=node, full=full) else: log.error('Invalid response when listing Joyent nodes: %s', result[1]) else: location = get_location() result = query(command='my/machines', location=location, method='GET') nodes = result[1] for node in nodes: if 'name' in node: node['location'] = location ret[node['name']] = reformat_node(item=node, full=full) return ret
list of nodes, keeping only a brief listing CLI Example: .. code-block:: bash salt-cloud -Q
def metrics(self, *metrics): for m in metrics: self._cauldron.use(self._shelf.find(m, Metric)) self.dirty = True return self
Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list
def close(self, save_to=None): if save_to is None: if not self.closed: _logger.debug(u"[provenance] Deleting temporary %s", self.folder) shutil.rmtree(self.folder, ignore_errors=True) else: save_to = os.path.abspath(save_to) _logger.info(u"[provenance] Finalizing Research Object") self._finalize() if os.path.isdir(save_to): _logger.info(u"[provenance] Deleting existing %s", save_to) shutil.rmtree(save_to) shutil.move(self.folder, save_to) _logger.info(u"[provenance] Research Object saved to %s", save_to) self.folder = save_to self.closed = True
Close the Research Object, optionally saving to specified folder. Closing will remove any temporary files used by this research object. After calling this method, this ResearchObject instance can no longer be used, except for no-op calls to .close(). The 'saveTo' folder should not exist - if it does, it will be deleted. It is safe to call this function multiple times without the 'saveTo' argument, e.g. within a try..finally block to ensure the temporary files of this Research Object are removed.
def memory_read64(self, addr, num_long_words): buf_size = num_long_words buf = (ctypes.c_ulonglong * buf_size)() units_read = self._dll.JLINKARM_ReadMemU64(addr, buf_size, buf, 0) if units_read < 0: raise errors.JLinkException(units_read) return buf[:units_read]
Reads memory from the target system in units of 64-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_long_words (int): number of long words to read Returns: List of long words read from the target system. Raises: JLinkException: if memory could not be read
def shutdown(self): try: self._ucan.shutdown() except Exception as ex: log.error(ex)
Shuts down all CAN interfaces and hardware interface.
def dump(self, path): try: with open(path, "wb") as f: f.write(self.__str__().encode("utf-8")) except: pass with open(path, "wb") as f: pickle.dump(self.__data__, f)
dump DictTree data to json files.
def cli(env, identifier, name, note, tag): image_mgr = SoftLayer.ImageManager(env.client) data = {} if name: data['name'] = name if note: data['note'] = note if tag: data['tag'] = tag image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image') if not image_mgr.edit(image_id, **data): raise exceptions.CLIAbort("Failed to Edit Image")
Edit details of an image.