code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def data_complete(datadir, sitedir, get_container_name): if any(not path.isdir(sitedir + x) for x in ('/files', '/run', '/solr')): return False if docker.is_boot2docker(): return all(docker.inspect_container(get_container_name(x)) for x in ('pgdata', 'venv')) return path.isdir(datadir + '/venv') and path.isdir(sitedir + '/postgres')
Return True if the directories and containers we're expecting are present in datadir, sitedir and containers
def get_extents(self): extents = ffi.new('cairo_rectangle_t *') if cairo.cairo_recording_surface_get_extents(self._pointer, extents): return (extents.x, extents.y, extents.width, extents.height)
Return the extents of the recording-surface. :returns: A ``(x, y, width, height)`` tuple of floats, or :obj:`None` if the surface is unbounded. *New in cairo 1.12*
def certify_tuple(value, certifier=None, min_len=None, max_len=None, required=True, schema=None): certify_iterable( value=value, types=tuple([tuple]), certifier=certifier, min_len=min_len, max_len=max_len, schema=schema, required=required, )
Validates a tuple, checking it against an optional schema. The schema should be a list of expected values replaced by functions which will be called to with the corresponding value in the input. A simple example: >>> certifier = certify_tuple(schema=( ... certify_key(kind='Model'), ... certify_int(min=0), ... )) >>> certifier((self.key, self.count)) :param tuple value: The value to be certified. :param func certifier: A function to be called on each value in the iterable to check that it is valid. :param int min_len: The minimum acceptable length for the iterable. If None, the minimum length is not checked. :param int max_len: The maximum acceptable length for the iterable. If None, the maximum length is not checked. :param bool required: Whether the value can't be `None`. Defaults to True. :param tuple schema: The schema against which the value should be checked. For single-item tuple make sure to add comma at the end of schema tuple, that is, for example: schema=(certify_int(),) :return: The certified tuple. :rtype: tuple :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid
def get_boundaries(self, filter_type, value): assert filter_type in self.handled_suffixes start = '-' end = '+' exclude = None if filter_type in (None, 'eq'): start = u'[%s%s' % (value, self.separator) end = start.encode('utf-8') + b'\xff' elif filter_type == 'gt': start = u'(%s' % value exclude = value elif filter_type == 'gte': start = u'[%s' % value elif filter_type == 'lt': end = u'(%s' % value exclude = value elif filter_type == 'lte': end = u'[%s%s' % (value, self.separator) end = end.encode('utf-8') + b'\xff' elif filter_type == 'startswith': start = u'[%s' % value end = start.encode('utf-8') + b'\xff' return start, end, exclude
Compute the boundaries to pass to zrangebylex depending of the filter type The third return value, ``exclude`` is ``None`` except for the filters `lt` and `gt` because we cannot explicitly exclude it when querying the sorted-set For the parameters, see BaseRangeIndex.store Notes ----- For zrangebylex: - `(` means "not included" - `[` means "included" - `\xff` is the last char, it allows to say "starting with" - `-` alone means "from the very beginning" - `+` alone means "to the very end"
def get_impala_queries(self, start_time, end_time, filter_str="", limit=100, offset=0): params = { 'from': start_time.isoformat(), 'to': end_time.isoformat(), 'filter': filter_str, 'limit': limit, 'offset': offset, } return self._get("impalaQueries", ApiImpalaQueryResponse, params=params, api_version=4)
Returns a list of queries that satisfy the filter @type start_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param start_time: Queries must have ended after this time @type end_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param end_time: Queries must have started before this time @param filter_str: A filter to apply to the queries. For example: 'user = root and queryDuration > 5s' @param limit: The maximum number of results to return @param offset: The offset into the return list @since: API v4
def unweave( target, advices=None, pointcut=None, ctx=None, depth=1, public=False, ): if advices is not None: if isroutine(advices): advices = [advices] if pointcut is None or callable(pointcut): pass elif isinstance(pointcut, string_types): pointcut = _namematcher(pointcut) else: error_msg = "Wrong pointcut to check weaving on {0}.".format(target) advice_msg = "Must be None, or be a str or a function/method." right_msg = "Not {0}".format(type(pointcut)) raise AdviceError( "{0} {1} {2}".format(error_msg, advice_msg, right_msg) ) if ctx is None: ctx = find_ctx(target) _unweave( target=target, advices=advices, pointcut=pointcut, ctx=ctx, depth=depth, depth_predicate=_publiccallable if public else callable )
Unweave advices on target with input pointcut. :param callable target: target from where checking pointcut and weaving advices. :param pointcut: condition for weaving advices on joinpointe. The condition depends on its type. :type pointcut: - NoneType: advices are weaved on target. - str: target name is compared to pointcut regex. - function: called with target in parameter, if True, advices will be weaved on target. :param ctx: target ctx (class or instance). :param int depth: class weaving depthing. :param bool public: (default True) weave only on public members :return: the intercepted functions created from input target.
def decorate(*reversed_views): fns = reversed_views[::-1] view = fns[0] for wrapper in fns[1:]: view = wrapper(view) return view
provide a syntax decorating views without nested calls. instead of: json_api_call(etag(<hash_fn>)(<view_fn>))) you can write: decorate(json_api_call, etag(<hash_fn>), <view_fn>)
def _open(self, skip=0): usb_device = self._get_usb_device(skip) if usb_device: usb_conf = usb_device.configurations[0] self._usb_int = usb_conf.interfaces[0][0] else: raise YubiKeyUSBHIDError('No USB YubiKey found') try: self._usb_handle = usb_device.open() self._usb_handle.detachKernelDriver(0) except Exception as error: if 'could not detach kernel driver from interface' in str(error): self._debug('The in-kernel-HID driver has already been detached\n') else: self._debug("detachKernelDriver not supported!\n") try: self._usb_handle.setConfiguration(1) except usb.USBError: self._debug("Unable to set configuration, ignoring...\n") self._usb_handle.claimInterface(self._usb_int) return True
Perform HID initialization
def update_readme_for_modules(modules): readme = parse_readme() module_docstrings = core_module_docstrings() if modules == ["__all__"]: modules = core_module_docstrings().keys() for module in modules: if module in module_docstrings: print_stderr("Updating README.md for module {}".format(module)) readme[module] = module_docstrings[module] else: print_stderr("Module {} not in core modules".format(module)) readme_file = os.path.join(modules_directory(), "README.md") with open(readme_file, "w") as f: f.write(create_readme(readme))
Update README.md updating the sections for the module names listed.
async def get_bluetooth_settings(self) -> List[Setting]: bt = await self.services["avContent"]["getBluetoothSettings"]({}) return [Setting.make(**x) for x in bt]
Get bluetooth settings.
def run_model(t_output_every, output_dir=None, m=None, force_resume=True, **iterate_args): r = runner.Runner(output_dir, m, force_resume) print(r) r.iterate(t_output_every=t_output_every, **iterate_args) return r
Convenience function to combine making a Runner object, and running it for some time. Parameters ---------- m: Model Model to run. iterate_args: Arguments to pass to :meth:`Runner.iterate`. Others: see :class:`Runner`. Returns ------- r: Runner runner object after it has finished running for the required time.
def paste(location): copyData = settings.getDataFile() if not location: location = "." try: data = pickle.load(open(copyData, "rb")) speech.speak("Pasting " + data["copyLocation"] + " to current directory.") except: speech.fail("It doesn't look like you've copied anything yet.") speech.fail("Type 'hallie copy <file>' to copy a file or folder.") return process, error = subprocess.Popen(["cp", "-r", data["copyLocation"], location], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate() if "denied" in process: speech.fail("Unable to paste your file successfully. This is most likely due to a permission issue. You can try to run me as sudo!")
paste a file or directory that has been previously copied
def redis_from_url(url): import redis url = url or "" parsed_url = urlparse(url) if parsed_url.scheme != "redis": return None kwargs = {} match = PASS_HOST_PORT.match(parsed_url.netloc) if match.group('password') is not None: kwargs['password'] = match.group('password') if match.group('host') is not None: kwargs['host'] = match.group('host') if match.group('port') is not None: kwargs['port'] = int(match.group('port')) if len(parsed_url.path) > 1: kwargs['db'] = int(parsed_url.path[1:]) return redis.StrictRedis(**kwargs)
Converts a redis URL used by celery into a `redis.Redis` object.
def entries(self): return ContentTypeEntriesProxy(self._client, self.space.id, self._environment_id, self.id)
Provides access to entry management methods for the given content type. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/entries :return: :class:`ContentTypeEntriesProxy <contentful_management.content_type_entries_proxy.ContentTypeEntriesProxy>` object. :rtype: contentful.content_type_entries_proxy.ContentTypeEntriesProxy Usage: >>> content_type_entries_proxy = content_type.entries() <ContentTypeEntriesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat">
def forward(self, inputs, lengths): x = self.embedder(inputs) x = self.dropout(x) x = pack_padded_sequence(x, lengths.cpu().numpy(), batch_first=self.batch_first) x, _ = self.rnn_layers[0](x) x, _ = pad_packed_sequence(x, batch_first=self.batch_first) x = self.dropout(x) x, _ = self.rnn_layers[1](x) for i in range(2, len(self.rnn_layers)): residual = x x = self.dropout(x) x, _ = self.rnn_layers[i](x) x = x + residual return x
Execute the encoder. :param inputs: tensor with indices from the vocabulary :param lengths: vector with sequence lengths (excluding padding) returns: tensor with encoded sequences
def part(self, *args, **kwargs): _parts = self.parts(*args, **kwargs) if len(_parts) == 0: raise NotFoundError("No part fits criteria") if len(_parts) != 1: raise MultipleFoundError("Multiple parts fit criteria") return _parts[0]
Retrieve single KE-chain part. Uses the same interface as the :func:`parts` method but returns only a single pykechain :class:`models.Part` instance. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :return: a single :class:`models.Part` :raises NotFoundError: When no `Part` is found :raises MultipleFoundError: When more than a single `Part` is found
def addPort(n: LNode, intf: Interface): d = PortTypeFromDir(intf._direction) ext_p = LayoutExternalPort( n, name=intf._name, direction=d, node2lnode=n._node2lnode) ext_p.originObj = originObjOfPort(intf) n.children.append(ext_p) addPortToLNode(ext_p, intf, reverseDirection=True) return ext_p
Add LayoutExternalPort for interface
def main(): from spyder.utils.qthelpers import qapplication app = qapplication() if os.name == 'nt': dialog = WinUserEnvDialog() else: dialog = EnvDialog() dialog.show() app.exec_()
Run Windows environment variable editor
def footprints_from_address(address, distance, footprint_type='building', retain_invalid=False): point = geocode(query=address) return footprints_from_point(point, distance, footprint_type=footprint_type, retain_invalid=retain_invalid)
Get footprints within some distance north, south, east, and west of an address. Parameters ---------- address : string the address to geocode to a lat-long point distance : numeric distance in meters footprint_type : string type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc. retain_invalid : bool if False discard any footprints with an invalid geometry Returns ------- GeoDataFrame
def list_repos(self, envs=[], query='/repositories/'): juicer.utils.Log.log_debug( "List Repos In: %s", ", ".join(envs)) repo_lists = {} for env in envs: repo_lists[env] = [] for env in envs: _r = self.connectors[env].get(query) if _r.status_code == Constants.PULP_GET_OK: for repo in juicer.utils.load_json_str(_r.content): if re.match(".*-{0}$".format(env), repo['id']): repo_lists[env].append(repo['display_name']) else: _r.raise_for_status() return repo_lists
List repositories in specified environments
def get_dweets_for(thing_name, key=None, session=None): if key is not None: params = {'key': key} else: params = None return _request('get', '/get/dweets/for/{0}'.format(thing_name), params=params, session=None)
Read all the dweets for a dweeter
def map(self, func, *columns): if not columns: return map(func, self.rows) else: values = (self.values(column) for column in columns) result = [map(func, v) for v in values] if len(columns) == 1: return result[0] else: return result
Map a function to rows, or to given columns
def set_elapsed_time(self, client): related_clients = self.get_related_clients(client) for cl in related_clients: if cl.timer is not None: client.create_time_label() client.t0 = cl.t0 client.timer.timeout.connect(client.show_time) client.timer.start(1000) break
Set elapsed time for slave clients.
def get_welcome_response(): session_attributes = {} card_title = "Welcome" speech_output = "Welcome to the Alexa Skills Kit sample. " \ "Please tell me your favorite color by saying, " \ "my favorite color is red" reprompt_text = "Please tell me your favorite color by saying, " \ "my favorite color is red." should_end_session = False return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session))
If we wanted to initialize the session to have some attributes we could add those here
def extract_ids(text, extractors): for extractor in extractors: for id in extractor.extract(text): yield id
Uses `extractors` to extract citation identifiers from a text. :Parameters: text : str The text to process extractors : `list`(`extractor`) A list of extractors to apply to the text :Returns: `iterable` -- a generator of extracted identifiers
def render_template(template_name, template_getter=get_app_template): def wrapper(func): template = template_getter(template_name) def _wraped(self, request, context, *args, **kwargs): res = func(self, request, context, *args, **kwargs) if isinstance(res, dict): return template.render(**res) else: return res return _wraped return wrapper
Decorator to specify which template to use for Wrapped Views. It will return string rendered by specified template and returned dictionary from wrapped views as a context for template. The returned value was not dictionary, it does nothing, just returns the result.
def load(self, rule_type, quiet = False): if self.filename and os.path.exists(self.filename): try: with open(self.filename, 'rt') as f: ruleset = json.load(f) self.about = ruleset['about'] if 'about' in ruleset else '' self.rules = {} for filename in ruleset['rules']: self.rules[filename] = [] for rule in ruleset['rules'][filename]: self.handle_rule_versions(filename, rule_type, rule) except Exception as e: printException(e) printError('Error: ruleset file %s contains malformed JSON.' % self.filename) self.rules = [] self.about = '' else: self.rules = [] if not quiet: printError('Error: the file %s does not exist.' % self.filename)
Open a JSON file definiting a ruleset and load it into a Ruleset object :param quiet: :return:
def set_widgets(self): if self.parent.aggregation_layer: aggr = self.parent.aggregation_layer.name() else: aggr = self.tr('no aggregation') html = self.tr('Please ensure the following information ' 'is correct and press Run.') html += '<br/><table cellspacing="4">' html += ('<tr>' ' <td><b>%s</b></td><td></td><td>%s</td>' '</tr><tr>' ' <td><b>%s</b></td><td></td><td>%s</td>' '</tr><tr>' ' <td><b>%s</b></td><td></td><td>%s</td>' '</tr><tr>' ' <td colspan="3"></td>' '</tr>' % ( self.tr('hazard layer').capitalize().replace( ' ', '&nbsp;'), self.parent.hazard_layer.name(), self.tr('exposure layer').capitalize().replace( ' ', '&nbsp;'), self.parent.exposure_layer.name(), self.tr('aggregation layer').capitalize().replace( ' ', '&nbsp;'), aggr)) self.lblSummary.setText(html)
Set widgets on the Summary tab.
def save(self): "Saves the state to the state file" with open(self.state_file, "w") as fh: json.dump({ "hosts": self.hosts, "stats": self.stats, }, fh)
Saves the state to the state file
def add(self, email): if email not in self._collaborators: self._collaborators[email] = ShareRequestValue.Add self._dirty = True
Add a collaborator. Args: str : Collaborator email address.
def _message(self, request_cls, destination=None, message_id=0, consent=None, extensions=None, sign=False, sign_prepare=False, nsprefix=None, sign_alg=None, digest_alg=None, **kwargs): if not message_id: message_id = sid() for key, val in self.message_args(message_id).items(): if key not in kwargs: kwargs[key] = val req = request_cls(**kwargs) if destination: req.destination = destination if consent: req.consent = "true" if extensions: req.extensions = extensions if nsprefix: req.register_prefix(nsprefix) if self.msg_cb: req = self.msg_cb(req) reqid = req.id if sign: return reqid, self.sign(req, sign_prepare=sign_prepare, sign_alg=sign_alg, digest_alg=digest_alg) else: logger.info("REQUEST: %s", req) return reqid, req
Some parameters appear in all requests so simplify by doing it in one place :param request_cls: The specific request type :param destination: The recipient :param message_id: A message identifier :param consent: Whether the principal have given her consent :param extensions: Possible extensions :param sign: Whether the request should be signed or not. :param sign_prepare: Whether the signature should be prepared or not. :param kwargs: Key word arguments specific to one request type :return: A tuple containing the request ID and an instance of the request_cls
def create_page(slug, post_data): logger.info('Call create Page') if MWiki.get_by_uid(slug): return False title = post_data['title'].strip() if len(title) < 2: return False return MWiki.__create_rec(slug, '2', post_data=post_data)
The page would be created with slug.
def acquire(self): start_time = time.time() while True: try: self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) break except (OSError,) as e: if e.errno != errno.EEXIST: raise if (time.time() - start_time) >= self.timeout: raise FileLockException("%s: Timeout occured." % self.lockfile) time.sleep(self.delay) self.is_locked = True
Acquire the lock, if possible. If the lock is in use, it check again every `delay` seconds. It does this until it either gets the lock or exceeds `timeout` number of seconds, in which case it throws an exception.
def _terminate(self): def generate_body(): d = defer.succeed(None) d.addBoth(defer.drop_param, self.agent.shutdown_agent) d.addBoth(lambda _: self.delete_document(self._descriptor)) return d return self._terminate_procedure(generate_body)
Shutdown agent gently removing the descriptor and notifying partners.
def replace_in_files(search, replace, depth=0, paths=None, confirm=True): if paths==None: paths = _s.dialogs.MultipleFiles('DIS AND DAT|*.*') if paths == []: return for path in paths: lines = read_lines(path) if depth: N=min(len(lines),depth) else: N=len(lines) for n in range(0,N): if lines[n].find(search) >= 0: lines[n] = lines[n].replace(search,replace) print(path.split(_os.path.pathsep)[-1]+ ': "'+lines[n]+'"') if not confirm: _os.rename(path, path+".backup") write_to_file(path, join(lines, '')) if confirm: if input("yes? ")=="yes": replace_in_files(search,replace,depth,paths,False) return
Does a line-by-line search and replace, but only up to the "depth" line.
def cache_property(key, empty, type): return property(lambda x: x._get_cache_value(key, empty, type), lambda x, v: x._set_cache_value(key, v, type), lambda x: x._del_cache_value(key), 'accessor for %r' % key)
Return a new property object for a cache header. Useful if you want to add support for a cache extension in a subclass.
def send_email(self, msg, tag=None): try: return self._send_email(msg, tag) except: self.exceptions.append(straceback()) return -2
Send an e-mail before completing the shutdown. Returns 0 if success.
def clean_readme(fname): with codecs.open(fname, 'r', 'utf-8') as f: return ''.join( re.sub(r':\w+:`([^`]+?)( <[^<>]+>)?`', r'``\1``', line) for line in f if not (line.startswith('.. currentmodule') or line.startswith('.. toctree')) )
Cleanup README.rst for proper PyPI formatting.
def appendAnchor(self, name=None, position=None, color=None, anchor=None): identifier = None if anchor is not None: anchor = normalizers.normalizeAnchor(anchor) if name is None: name = anchor.name if position is None: position = anchor.position if color is None: color = anchor.color if anchor.identifier is not None: existing = set([a.identifier for a in self.anchors if a.identifier is not None]) if anchor.identifier not in existing: identifier = anchor.identifier name = normalizers.normalizeAnchorName(name) position = normalizers.normalizeCoordinateTuple(position) if color is not None: color = normalizers.normalizeColor(color) identifier = normalizers.normalizeIdentifier(identifier) return self._appendAnchor(name, position=position, color=color, identifier=identifier)
Append an anchor to this glyph. >>> anchor = glyph.appendAnchor("top", (10, 20)) This will return a :class:`BaseAnchor` object representing the new anchor in the glyph. ``name`` indicated the name to be assigned to the anchor. It must be a :ref:`type-string` or ``None``. ``position`` indicates the x and y location to be applied to the anchor. It must be a :ref:`type-coordinate` value. ``color`` indicates the color to be applied to the anchor. It must be a :ref:`type-color` or ``None``. >>> anchor = glyph.appendAnchor("top", (10, 20), color=(1, 0, 0, 1)) ``anchor`` may be a :class:`BaseAnchor` object from which attribute values will be copied. If ``name``, ``position`` or ``color`` are specified as arguments, those values will be used instead of the values in the given anchor object.
def _removepkg(self, package): try: subprocess.call("removepkg {0} {1}".format(self.flag, package), shell=True) if os.path.isfile(self.dep_path + package): os.remove(self.dep_path + package) except subprocess.CalledProcessError as er: print(er) raise SystemExit()
removepkg Slackware command
def kill(self, id, signal=signal.SIGTERM): args = { 'id': id, 'signal': int(signal), } self._kill_chk.check(args) return self._client.json('job.kill', args)
Kill a job with given id :WARNING: beware of what u kill, if u killed redis for example core0 or coreX won't be reachable :param id: job id to kill
def get_all_nodes(self, addr, is_syscall=None, anyaddr=False): results = [ ] for cfg_node in self.graph.nodes(): if cfg_node.addr == addr or (anyaddr and cfg_node.size is not None and cfg_node.addr <= addr < (cfg_node.addr + cfg_node.size) ): if is_syscall and cfg_node.is_syscall: results.append(cfg_node) elif is_syscall is False and not cfg_node.is_syscall: results.append(cfg_node) else: results.append(cfg_node) return results
Get all CFGNodes whose address is the specified one. :param addr: Address of the node :param is_syscall: True returns the syscall node, False returns the normal CFGNode, None returns both :return: all CFGNodes
def get_remote_chassis_id_mac(self, tlv_data): ret, parsed_val = self._check_common_tlv_format( tlv_data, "MAC:", "Chassis ID TLV") if not ret: return None mac = parsed_val[1].split('\n') return mac[0].strip()
Returns Remote Chassis ID MAC from the TLV.
def to_dict(self): ret = merge_dicts(self.__attributes__, self.__relations__, self.__fields__) ret = {k : v.value for k,v in ret.items()} ret['maps'] = {k : v.value for k,v in self.maps.items()} return ret
Return a dict representing the ChemicalEntity that can be read back using from_dict.
def all_props(self): d = self.arg_props d.update(self.props) return d
Return a dictionary with the values of all children, and place holders for all of the section argumemts. It combines props and arg_props
def is_transition_matrix(T, tol=1e-12): r T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') if _issparse(T): return sparse.assessment.is_transition_matrix(T, tol) else: return dense.assessment.is_transition_matrix(T, tol)
r"""Check if the given matrix is a transition matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Matrix to check tol : float (optional) Floating point tolerance to check with Returns ------- is_transition_matrix : bool True, if T is a valid transition matrix, False otherwise Notes ----- A valid transition matrix :math:`P=(p_{ij})` has non-negative elements, :math:`p_{ij} \geq 0`, and elements of each row sum up to one, :math:`\sum_j p_{ij} = 1`. Matrices wit this property are also called stochastic matrices. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_transition_matrix >>> A = np.array([[0.4, 0.5, 0.3], [0.2, 0.4, 0.4], [-1, 1, 1]]) >>> is_transition_matrix(A) False >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_transition_matrix(T) True
def _error_is_decreasing(self, last_error): current_error = self._compute_error() is_decreasing = current_error < last_error return is_decreasing, current_error
True if current error is less than last_error.
def transform(self, trajs_tuple, y=None): return [self.partial_transform(traj_zip) for traj_zip in zip(*trajs_tuple)]
Featurize a several trajectories. Parameters ---------- traj_list : list(mdtraj.Trajectory) Trajectories to be featurized. Returns ------- features : list(np.ndarray), length = len(traj_list) The featurized trajectories. features[i] is the featurized version of traj_list[i] and has shape (n_samples_i, n_features)
def is_merge_origin(self): if self.gridSpan > 1 and not self.vMerge: return True if self.rowSpan > 1 and not self.hMerge: return True return False
True if cell is top-left in merged cell range.
def __wrap_with_tuple(self) -> tuple: l = list() length = len(self.data) while self.idx < length: l.append(self.__parse()) return tuple(l)
Returns a tuple of all nested bencode elements.
def get_feature_subset(self, subset_idx): subset_idx = np.asarray(subset_idx) if not (max(subset_idx) < self.__num_features) and (min(subset_idx) >= 0): raise UnboundLocalError('indices out of range for the dataset. ' 'Max index: {} Min index : 0'.format( self.__num_features)) sub_data = {sample: features[subset_idx] for sample, features in self.__data.items()} new_descr = 'Subset features derived from: \n ' + self.__description subdataset = MLDataset(data=sub_data, labels=self.__labels, classes=self.__classes, description=new_descr, feature_names=self.__feature_names[subset_idx]) return subdataset
Returns the subset of features indexed numerically. Parameters ---------- subset_idx : list, ndarray List of indices to features to be returned Returns ------- MLDataset : MLDataset with subset of features requested. Raises ------ UnboundLocalError If input indices are out of bounds for the dataset.
def LengthMeters(self): assert(len(self._points) > 0) length = 0 for i in range(0, len(self._points) - 1): length += self._points[i].GetDistanceMeters(self._points[i+1]) return length
Return length of this polyline in meters.
def dedent_block_string_value(raw_string: str) -> str: lines = raw_string.splitlines() common_indent = None for line in lines[1:]: indent = leading_whitespace(line) if indent < len(line) and (common_indent is None or indent < common_indent): common_indent = indent if common_indent == 0: break if common_indent: lines[1:] = [line[common_indent:] for line in lines[1:]] while lines and not lines[0].strip(): lines = lines[1:] while lines and not lines[-1].strip(): lines = lines[:-1] return "\n".join(lines)
Produce the value of a block string from its parsed raw value. Similar to CoffeeScript's block string, Python's docstring trim or Ruby's strip_heredoc. This implements the GraphQL spec's BlockStringValue() static algorithm.
def _gate_name(self, gate): try: name = gate.tex_str() except AttributeError: name = str(gate) return name
Return the string representation of the gate. Tries to use gate.tex_str and, if that is not available, uses str(gate) instead. :param string gate: Gate object of which to get the name / LaTeX representation. :return: LaTeX gate name. :rtype: string
def _add_to_dict(t, container, name, value): if name in container: raise Exception("%s '%s' already exists" % (t, name)) else: container[name] = value
Adds an item to a dictionary, or raises an exception if an item with the specified key already exists in the dictionary.
def run(self, executable: Executable, memory_map: Dict[str, List[Union[int, float]]] = None) -> np.ndarray: self.qam.load(executable) if memory_map: for region_name, values_list in memory_map.items(): for offset, value in enumerate(values_list): self.qam.write_memory(region_name=region_name, offset=offset, value=value) return self.qam.run() \ .wait() \ .read_memory(region_name='ro')
Run a quil executable. If the executable contains declared parameters, then a memory map must be provided, which defines the runtime values of these parameters. :param executable: The program to run. You are responsible for compiling this first. :param memory_map: The mapping of declared parameters to their values. The values are a list of floats or integers. :return: A numpy array of shape (trials, len(ro-register)) that contains 0s and 1s.
def repeat(self, repeats, *args, **kwargs): nv.validate_repeat(args, kwargs) values = self._data.repeat(repeats) return type(self)(values.view('i8'), dtype=self.dtype)
Repeat elements of an array. See Also -------- numpy.ndarray.repeat
def identifier_md5(self): as_int = (self.identifier * 1e4).astype(np.int64) hashed = util.md5_object(as_int.tostring(order='C')) return hashed
Return an MD5 of the identifier
def parse_host(entity, default_port=DEFAULT_PORT): host = entity port = default_port if entity[0] == '[': host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): return entity, default_port elif entity.find(':') != -1: if entity.count(':') > 1: raise ValueError("Reserved characters such as ':' must be " "escaped according RFC 2396. An IPv6 " "address literal must be enclosed in '[' " "and ']' according to RFC 2732.") host, port = host.split(':', 1) if isinstance(port, string_type): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) port = int(port) return host.lower(), port
Validates a host string Returns a 2-tuple of host followed by port where port is default_port if it wasn't specified in the string. :Parameters: - `entity`: A host or host:port string where host could be a hostname or IP address. - `default_port`: The port number to use when one wasn't specified in entity.
def execute_request(conn, classname, max_open, max_pull): start = ElapsedTimer() result = conn.OpenEnumerateInstances(classname, MaxObjectCount=max_open) print('open rtn eos=%s context=%s, count=%s time=%s ms' % (result.eos, result.context, len(result.instances), start.elapsed_ms())) insts = result.instances pull_count = 0 while not result.eos: pull_count += 1 op_start = ElapsedTimer() result = conn.PullInstancesWithPath(result.context, MaxObjectCount=max_pull) insts.extend(result.instances) print('pull rtn eos=%s context=%s, insts=%s time=%s ms' % (result.eos, result.context, len(result.instances), op_start.elapsed_ms())) print('Result instance count=%s pull count=%s time=%.2f sec' % \ (len(insts), pull_count, start.elapsed_sec())) return insts
Enumerate instances defined by the function's classname argument using the OpenEnumerateInstances and PullInstancesWithPath. * classname - Classname for the enumeration. * max_open - defines the maximum number of instances for the server to return for the open *max_pull defines the maximum number of instances for the WBEM server to return for each pull operation. Displays results of each open or pull operation including size, return parameters, and time to execute. Any exception exits the function.
def use(wcspkg, raise_err=True): global coord_types, wcs_configured, WCS if wcspkg not in common.custom_wcs: modname = 'wcs_%s' % (wcspkg) path = os.path.join(wcs_home, '%s.py' % (modname)) try: my_import(modname, path) except ImportError: return False if wcspkg in common.custom_wcs: bnch = common.custom_wcs[wcspkg] WCS = bnch.wrapper_class coord_types = bnch.coord_types wcs_configured = True return True return False
Choose WCS package.
def normalize(self) -> 'State': tensor = self.tensor / bk.ccast(bk.sqrt(self.norm())) return State(tensor, self.qubits, self._memory)
Normalize the state
def base64url_decode(input): rem = len(input) % 4 if rem > 0: input += b'=' * (4 - rem) return base64.urlsafe_b64decode(input)
Helper method to base64url_decode a string. Args: input (str): A base64url_encoded string to decode.
def get_coord_line_number(self,coord): if coord[0] in self._coords: if coord[1] in self._coords[coord[0]]: return self._coords[coord[0]][coord[1]] return None
return the one-indexed line number given the coordinates
def serialize(self, value, **kwargs): if types.Type.is_type(self.attr_type): try: value = self.accessor.get(value, **kwargs) except (AttributeError, KeyError): if not hasattr(self, "default") and self.required: raise value = self.default() if callable(self.default) else self.default return self.attr_type.serialize(value, **_get_context(self._attr_type_serialize_argspec, kwargs)) return self.attr_type
Serialize the attribute of the input data. Gets the attribute value with accessor and converts it using the type serialization. Schema will place this serialized value into corresponding compartment of the HAL structure with the name of the attribute as a key. :param value: Value to get the attribute value from. :return: Serialized attribute value.
def log_transform(rates): transformed = [] for key in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']: try: value = math.log10(rates[key]) except ValueError: value = "NA" except KeyError: continue transformed.append(value) return '\t'.join(map(str, transformed))
log transform a numeric value, unless it is zero, or negative
def stayOpen(self): if not self._wantToClose: self.show() self.setGeometry(self._geometry)
optional dialog restore
def get_val_by_text(root,search): found_flag = False for el in root.iter(): if found_flag: return(el) if el.text == search: found_flag = True
From MeasYaps XML root find next sibling of node matching 'search'. MeasYaps looks like: <value>Key</value> <value>Value</value> Thus 'search' is the Key and we want to find the node that has the Value. We return the node containing the desired Value. Arguments: root (Element) root XML node (xml.etree.ElementTree Element) search (String) String to match Element.text
def group_citation_edges(edges: Iterable[EdgeTuple]) -> Iterable[Tuple[str, Iterable[EdgeTuple]]]: return itt.groupby(edges, key=_citation_sort_key)
Return an iterator over pairs of citation values and their corresponding edge iterators.
def weld_filter(array, weld_type, bool_array): obj_id, weld_obj = create_weld_object(array) bool_obj_id = get_weld_obj_id(weld_obj, bool_array) weld_template = weld_obj.weld_code = weld_template.format(array=obj_id, bool_array=bool_obj_id, type=weld_type) return weld_obj
Returns a new array only with the elements with a corresponding True in bool_array. Parameters ---------- array : numpy.ndarray or WeldObject Input data. weld_type : WeldType Type of the elements in the input array. bool_array : numpy.ndarray or WeldObject Array of bool with True for elements in array desired in the result array. Returns ------- WeldObject Representation of this computation.
def mark_running(self): with self._lock: self._set_state(self._RUNNING, self._PAUSED)
Moves the service to the Running state. Raises if the service is not currently in the Paused state.
def readline(self, size=-1): if self.closed: raise ValueError("I/O operation on closed file") pos = self.buffer.find(b"\n") + 1 if pos == 0: while True: buf = self.fileobj.read(self.blocksize) self.buffer += buf if not buf or b"\n" in buf: pos = self.buffer.find(b"\n") + 1 if pos == 0: pos = len(self.buffer) break if size != -1: pos = min(size, pos) buf = self.buffer[:pos] self.buffer = self.buffer[pos:] self.position += len(buf) return buf
Read one entire line from the file. If size is present and non-negative, return a string with at most that size, which may be an incomplete line.
def runContainer(image, **kwargs): container = None try: container = client.containers.run(image, **kwargs) if "name" in kwargs.keys(): print("Container", kwargs["name"], "is now running.") except ContainerError as exc: eprint("Failed to run container") raise exc except ImageNotFound as exc: eprint("Failed to find image to run as a docker container") raise exc except APIError as exc: eprint("Unhandled error") raise exc return container
Run a docker container using a given image; passing keyword arguments documented to be accepted by docker's client.containers.run function No extra side effects. Handles and reraises ContainerError, ImageNotFound, and APIError exceptions.
def get_instance(self, payload): return WorkerChannelInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], worker_sid=self._solution['worker_sid'], )
Build an instance of WorkerChannelInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance
def load(cls, path, name): filepath = aux.joinpath(path, name + '.proteindb') with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip: proteinsString = io.TextIOWrapper(containerZip.open('proteins'), encoding='utf-8' ).read() peptidesString = io.TextIOWrapper(containerZip.open('peptides'), encoding='utf-8' ).read() infoString = io.TextIOWrapper(containerZip.open('info'), encoding='utf-8' ).read() newInstance = cls() newInstance.proteins = json.loads(proteinsString, object_hook=ProteinSequence.jsonHook) newInstance.peptides = json.loads(peptidesString, object_hook=PeptideSequence.jsonHook) newInstance.info.update(json.loads(infoString)) return newInstance
Imports the specified ``proteindb`` file from the hard disk. :param path: filedirectory of the ``proteindb`` file :param name: filename without the file extension ".proteindb" .. note:: this generates rather large files, which actually take longer to import than to newly generate. Maybe saving / loading should be limited to the protein database whitout in silico digestion information.
def tangent_lineation_plot(ax, strikes, dips, rakes): rake_x, rake_y = mplstereonet.rake(strikes, dips, rakes) mag = np.hypot(rake_x, rake_y) u, v = -rake_x / mag, -rake_y / mag pole_x, pole_y = mplstereonet.pole(strikes, dips) arrows = ax.quiver(pole_x, pole_y, u, v, width=1, headwidth=4, units='dots', pivot='middle') return arrows
Makes a tangent lineation plot for normal faults with the given strikes, dips, and rakes.
def directed_tripartition(seq): for a, b, c in directed_tripartition_indices(len(seq)): yield (tuple(seq[i] for i in a), tuple(seq[j] for j in b), tuple(seq[k] for k in c))
Generator over all directed tripartitions of a sequence. Args: seq (Iterable): a sequence. Yields: tuple[tuple]: A tripartition of ``seq``. Example: >>> seq = (2, 5) >>> list(directed_tripartition(seq)) # doctest: +NORMALIZE_WHITESPACE [((2, 5), (), ()), ((2,), (5,), ()), ((2,), (), (5,)), ((5,), (2,), ()), ((), (2, 5), ()), ((), (2,), (5,)), ((5,), (), (2,)), ((), (5,), (2,)), ((), (), (2, 5))]
def create_domain(provider, context, **kwargs): session = get_session(provider.region) client = session.client("route53") domain = kwargs.get("domain") if not domain: logger.error("domain argument or BaseDomain variable not provided.") return False zone_id = create_route53_zone(client, domain) return {"domain": domain, "zone_id": zone_id}
Create a domain within route53. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded.
def decode_cert(cert): ret_dict = {} subject_xname = X509_get_subject_name(cert.value) ret_dict["subject"] = _create_tuple_for_X509_NAME(subject_xname) notAfter = X509_get_notAfter(cert.value) ret_dict["notAfter"] = ASN1_TIME_print(notAfter) peer_alt_names = _get_peer_alt_names(cert) if peer_alt_names is not None: ret_dict["subjectAltName"] = peer_alt_names return ret_dict
Convert an X509 certificate into a Python dictionary This function converts the given X509 certificate into a Python dictionary in the manner established by the Python standard library's ssl module.
def run_suite(case, config, summary): m = _load_case_module(case, config) result = m.run(case, config) summary[case] = _summarize_result(m, result) _print_summary(m, case, summary) if result['Type'] == 'Book': for name, page in six.iteritems(result['Data']): functions.create_page_from_template("validation.html", os.path.join(livvkit.index_dir, "validation", name + ".html")) functions.write_json(page, os.path.join(livvkit.output_dir, "validation"), name + ".json") else: functions.create_page_from_template("validation.html", os.path.join(livvkit.index_dir, "validation", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "validation"), case + ".json")
Run the full suite of validation tests
def _dscl(cmd, ctype='create'): if __grains__['osrelease_info'] < (10, 8): source, noderoot = '.', '' else: source, noderoot = 'localhost', '/Local/Default' if noderoot: cmd[0] = noderoot + cmd[0] return __salt__['cmd.run_all']( ['dscl', source, '-' + ctype] + cmd, output_loglevel='quiet' if ctype == 'passwd' else 'debug', python_shell=False )
Run a dscl -create command
def parse_netloc(scheme, netloc): auth, _netloc = netloc.split('@') sender, token = auth.split(':') if ':' in _netloc: domain, port = _netloc.split(':') port = int(port) else: domain = _netloc if scheme == 'https': port = 443 else: port = 80 return dict(sender=sender, token=token, domain=domain, port=port)
Parse netloc string.
def inflate(deflated_vector): dv = json.loads(deflated_vector) result = np.zeros(5555) for n in dv['indices']: result[int(n)] = dv['indices'][n] return result
Given a defalated vector, inflate it into a np array and return it
def check_git(): try: with open(os.devnull, "wb") as devnull: subprocess.check_call(["git", "--version"], stdout=devnull, stderr=devnull) except: raise RuntimeError("Please make sure git is installed and on your path.")
Check if git command is available.
def _factorize_from_iterables(iterables): if len(iterables) == 0: return [[], []] return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
A higher-level wrapper over `_factorize_from_iterable`. *This is an internal function* Parameters ---------- iterables : list-like of list-likes Returns ------- codes_list : list of ndarrays categories_list : list of Indexes Notes ----- See `_factorize_from_iterable` for more info.
def sleep(self, unique_id, delay, configs=None): self.pause(unique_id, configs) time.sleep(delay) self.resume(unique_id, configs)
Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds
def ext_publish(self, instance, loop, *args, **kwargs): if self.external_signaller is not None: return self.external_signaller.publish_signal(self, instance, loop, args, kwargs)
If 'external_signaller' is defined, calls it's publish method to notify external event systems. This is for internal usage only, but it's doumented because it's part of the interface with external notification systems.
def load_suite_from_stdin(self): suite = unittest.TestSuite() rules = Rules("stream", suite) line_generator = self._parser.parse_stdin() return self._load_lines("stream", line_generator, suite, rules)
Load a test suite with test lines from the TAP stream on STDIN. :returns: A ``unittest.TestSuite`` instance
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): if 'RememberedNetworks' not in match: return for wifi in match['RememberedNetworks']: ssid = wifi.get('SSIDString', 'UNKNOWN_SSID') security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE') event_data = plist_event.PlistTimeEventData() event_data.desc = ( '[WiFi] Connected to network: <{0:s}> using security {1:s}').format( ssid, security_type) event_data.key = 'item' event_data.root = '/RememberedNetworks' datetime_value = wifi.get('LastConnected', None) if datetime_value: event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts relevant Airport entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
def main(): try: command = sys.argv[1] except IndexError: return error_message() try: module = importlib.import_module('i18n.%s' % command) module.main.args = sys.argv[2:] except (ImportError, AttributeError): return error_message() return module.main()
Executes the given command. Returns error_message if command is not valid. Returns: Output of the given command or error message if command is not valid.
def abort (aggregate): while True: try: aggregate.abort() aggregate.finish() aggregate.end_log_output(interrupt=True) break except KeyboardInterrupt: log.warn(LOG_CHECK, _("user abort; force shutdown")) aggregate.end_log_output(interrupt=True) abort_now()
Helper function to ensure a clean shutdown.
def ned2geodetic(n: float, e: float, d: float, lat0: float, lon0: float, h0: float, ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]: x, y, z = enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg) return ecef2geodetic(x, y, z, ell, deg=deg)
Converts North, East, Down to target latitude, longitude, altitude Parameters ---------- n : float or numpy.ndarray of float North NED coordinate (meters) e : float or numpy.ndarray of float East NED coordinate (meters) d : float or numpy.ndarray of float Down NED coordinate (meters) lat0 : float Observer geodetic latitude lon0 : float Observer geodetic longitude h0 : float observer altitude above geodetic ellipsoid (meters) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- lat : float target geodetic latitude lon : float target geodetic longitude h : float target altitude above geodetic ellipsoid (meters)
def configure(self, sbi_config: str): config_dict = json.loads(sbi_config) self.debug_stream('SBI configuration:\n%s', json.dumps(config_dict, indent=2)) try: sbi = Subarray(self.get_name()).configure_sbi(config_dict) except jsonschema.exceptions.ValidationError as error: return json.dumps(dict(path=error.absolute_path.__str__(), schema_path=error.schema_path.__str__(), message=error.message), indent=2) except RuntimeError as error: return json.dumps(dict(error=str(error)), indent=2) return 'Accepted SBI: {}'.format(sbi.id)
Configure an SBI for this subarray. Args: sbi_config (str): SBI configuration JSON Returns: str,
def get_marshmallow_schema_name(self, plugin, schema): try: return plugin.openapi.refs[schema] except KeyError: plugin.spec.definition(schema.__name__, schema=schema) return schema.__name__
Get the schema name. If the schema doesn't exist, create it.
def listen(self): import select while self.connected: r, w, e = select.select((self.ws.sock, ), (), ()) if r: self.on_message() elif e: self.subscriber.on_sock_error(e) self.disconnect()
Set up a quick connection. Returns on disconnect. After calling `connect()`, this waits for messages from the server using `select`, and notifies the subscriber of any events.
def update_index(self, name, value): kwargs = {} kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value) return self.post(**kwargs)
Changes the definition of a KV Store index. :param name: name of index to change :type name: ``string`` :param value: new index definition :type value: ``dict`` or ``string`` :return: Result of POST request
def Sample(self, tasks_status): sample_time = time.time() sample = '{0:f}\t{1:d}\t{2:d}\t{3:d}\t{4:d}\t{5:d}\n'.format( sample_time, tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks) self._WritesString(sample)
Takes a sample of the status of queued tasks for profiling. Args: tasks_status (TasksStatus): status information about tasks.
def load(config_path: str): if os.path.splitext(config_path)[1] in ('.yaml', '.yml'): _ = load_yaml_configuration(config_path, translator=PipelineTranslator()) elif os.path.splitext(config_path)[1] == '.py': _ = load_python_configuration(config_path) else: raise ValueError('Unknown configuration extension: %r' % os.path.splitext(config_path)[1]) yield
Load a configuration and keep it alive for the given context :param config_path: path to a configuration file
def _deserialize(x, elementType, compress, relicReadBinFunc): b = (c_ubyte*len(x))(*bytearray(x)) flag = c_int(compress) result = elementType() relicReadBinFunc(byref(result), byref(b), len(x), flag) return result
Deserializes a bytearray @x, into an @element of the correct type, using the a relic read_bin function and the specified @compressed flag. This is the underlying implementation for deserialize G1, G2, and Gt.
def _reset(self): with self._lock: self.stop() self.start() for svc_ref in self.get_bindings(): if not self.requirement.filter.matches( svc_ref.get_properties() ): self.on_service_departure(svc_ref)
Called when the filter has been changed