code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def get_item_sh_fields(self, identity=None, item_date=None, sh_id=None, rol='author'): eitem_sh = self.__get_item_sh_fields_empty(rol) if identity: sh_ids = self.get_sh_ids(identity, self.get_connector_name()) eitem_sh[rol + "_id"] = sh_ids.get('id', '') eitem_sh[rol + "_uuid"] = sh_ids.get('uuid', '') eitem_sh[rol + "_name"] = identity.get('name', '') eitem_sh[rol + "_user_name"] = identity.get('username', '') eitem_sh[rol + "_domain"] = self.get_identity_domain(identity) elif sh_id: eitem_sh[rol + "_id"] = sh_id eitem_sh[rol + "_uuid"] = self.get_uuid_from_id(sh_id) else: return eitem_sh if rol + "_uuid" not in eitem_sh or not eitem_sh[rol + "_uuid"]: return self.__get_item_sh_fields_empty(rol, undefined=True) profile = self.get_profile_sh(eitem_sh[rol + "_uuid"]) if profile: eitem_sh[rol + "_name"] = profile.get('name', eitem_sh[rol + "_name"]) email = profile.get('email', None) if email: eitem_sh[rol + "_domain"] = self.get_email_domain(email) eitem_sh[rol + "_gender"] = profile.get('gender', self.unknown_gender) eitem_sh[rol + "_gender_acc"] = profile.get('gender_acc', 0) elif not profile and sh_id: logger.warning("Can't find SH identity profile: %s", sh_id) if not eitem_sh.get(rol + "_gender"): eitem_sh[rol + "_gender"] = self.unknown_gender eitem_sh[rol + "_gender_acc"] = 0 eitem_sh[rol + "_org_name"] = self.get_enrollment(eitem_sh[rol + "_uuid"], item_date) eitem_sh[rol + "_bot"] = self.is_bot(eitem_sh[rol + '_uuid']) return eitem_sh
Get standard SH fields from a SH identity
def contains(self, seqid, start, end, overlap=True): d = self.positions.get(seqid,[]) if overlap: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if not (end <= gff_start or start >= gff_end)] else: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if (gff_start <= start and gff_end >= end)]
This returns a list of GFF objects which cover a specified location. :param seqid: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a GFF object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of GFF objects
def memoize(fun): argspec = inspect2.getfullargspec(fun) arg_names = argspec.args + argspec.kwonlyargs kwargs_defaults = get_kwargs_defaults(argspec) def cache_key(args, kwargs): return get_args_tuple(args, kwargs, arg_names, kwargs_defaults) @functools.wraps(fun) def new_fun(*args, **kwargs): k = cache_key(args, kwargs) if k not in new_fun.__cache: new_fun.__cache[k] = fun(*args, **kwargs) return new_fun.__cache[k] def clear_cache(): new_fun.__cache.clear() new_fun.__cache = {} new_fun.clear_cache = clear_cache return new_fun
Memoizes return values of the decorated function. Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache() is called on the function.
def get_method_serializers(self, http_method): if http_method == 'HEAD' and 'HEAD' not in self.method_serializers: http_method = 'GET' return ( self.method_serializers.get(http_method, self.serializers), self.default_method_media_type.get( http_method, self.default_media_type) )
Get request method serializers + default media type. Grab serializers from ``method_serializers`` if defined, otherwise returns the default serializers. Uses GET serializers for HEAD requests if no HEAD serializers were specified. The method also determines the default media type. :param http_method: HTTP method as a string. :returns: Tuple of serializers and default media type.
def _msge_with_gradient_underdetermined(data, delta, xvschema, skipstep, p): t, m, l = data.shape d = None j, k = 0, 0 nt = np.ceil(t / skipstep) for trainset, testset in xvschema(t, skipstep): a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p) c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p) e = sp.linalg.inv(np.eye(a.shape[0]) * delta ** 2 + a.dot(a.T)) cc = c.transpose().dot(c) be = b.transpose().dot(e) bee = be.dot(e) bea = be.dot(a) beea = bee.dot(a) beacc = bea.dot(cc) dc = d.transpose().dot(c) j += np.sum(beacc * bea - 2 * bea * dc) + np.sum(d ** 2) k += np.sum(beea * dc - beacc * beea) * 4 * delta return j / (nt * d.size), k / (nt * d.size)
Calculate mean squared generalization error and its gradient for underdetermined equation system.
def find(format): try: serializer = SERIALIZERS[format] except KeyError: raise UnknownSerializer('No serializer found for %s' % format.acronym) return serializer
Find and return a serializer for the given format. Arguments: format -- A Format instance.
def save(self, mode=0o600): if self._parent is not None: self._parent.save(mode=mode) else: config_dir = os.path.dirname(os.path.abspath(self.config_files[-1])) try: os.makedirs(config_dir) except OSError as e: if not (e.errno == errno.EEXIST and os.path.isdir(config_dir)): raise with open(self.config_files[-1], "wb" if sys.version_info < (3, 0) else "w") as fh: self._dump(fh) os.chmod(self.config_files[-1], mode) self._logger.debug("Saved config to %s", self.config_files[-1])
Serialize the config data to the user home directory. :param mode: The octal Unix mode (permissions) for the config file.
def fields(self): return ( self.locus, self.offset_start, self.offset_end, self.alignment_key)
Fields that should be considered for our notion of object equality.
def _translate_bisz(self, oprnd1, oprnd2, oprnd3): assert oprnd1.size and oprnd3.size op1_var = self._translate_src_oprnd(oprnd1) op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3) result = smtfunction.ite(oprnd3.size, op1_var == 0x0, smtsymbol.Constant(oprnd3.size, 0x1), smtsymbol.Constant(oprnd3.size, 0x0)) return [op3_var == result] + op3_var_constrs
Return a formula representation of a BISZ instruction.
def validate(self, uri): requirement = self.requirement() uri_component = uri.component(self.component()) if uri_component is None: return requirement != WURIComponentVerifier.Requirement.required if requirement == WURIComponentVerifier.Requirement.unsupported: return False re_obj = self.re_obj() if re_obj is not None: return re_obj.match(uri_component) is not None return True
Check an URI for compatibility with this specification. Return True if the URI is compatible. :param uri: an URI to check :return: bool
def _save_work_results(self, run_stats, scores, num_processed_images, filename): with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow( ['SubmissionID', 'ExternalSubmissionId', 'Score', 'CompletedBatches', 'BatchesWithError', 'ProcessedImages', 'MinEvalTime', 'MaxEvalTime', 'MedianEvalTime', 'MeanEvalTime', 'ErrorMsg']) for submission_id in sorted(iterkeys(run_stats)): stat = run_stats.get( submission_id, collections.defaultdict(lambda: float('NaN'))) external_id = self.submissions.get_external_id(submission_id) error_msg = '' while not error_msg and stat['error_messages']: error_msg = stat['error_messages'].pop() if error_msg.startswith('Cant copy adversarial batch locally'): error_msg = '' writer.writerow([ submission_id, external_id, scores.get(submission_id, None), stat['completed'], stat['num_errors'], num_processed_images.get(submission_id, None), stat['min_eval_time'], stat['max_eval_time'], stat['median_eval_time'], stat['mean_eval_time'], error_msg ])
Saves statistics about each submission. Saved statistics include score; number of completed and failed batches; min, max, average and median time needed to run one batch. Args: run_stats: dictionary with runtime statistics for submissions, can be generated by WorkPiecesBase.compute_work_statistics scores: dictionary mapping submission ids to scores num_processed_images: dictionary with number of successfully processed images by each submission, one of the outputs of ClassificationBatches.compute_classification_results filename: output filename
def append(self, node): "Append a new subnode" if not isinstance(node, self.__class__): raise TypeError('Expected Node instance, got %r' % node) self.nodes.append(node)
Append a new subnode
def reset(self): self.context = pyblish.api.Context() self.plugins = pyblish.api.discover() self.was_discovered.emit() self.pair_generator = None self.current_pair = (None, None) self.current_error = None self.processing = { "nextOrder": None, "ordersWithError": set() } self._load() self._run(until=pyblish.api.CollectorOrder, on_finished=self.was_reset.emit)
Discover plug-ins and run collection
def get_rsa_props( object_class, exported_cfgs, remote_intents=None, ep_svc_id=None, fw_id=None, pkg_vers=None, service_intents=None, ): results = {} if not object_class: raise ArgumentError( "object_class", "object_class must be an [] of Strings" ) results["objectClass"] = object_class if not exported_cfgs: raise ArgumentError( "exported_cfgs", "exported_cfgs must be an array of Strings" ) results[REMOTE_CONFIGS_SUPPORTED] = exported_cfgs results[SERVICE_IMPORTED_CONFIGS] = exported_cfgs if remote_intents: results[REMOTE_INTENTS_SUPPORTED] = remote_intents if service_intents: results[SERVICE_INTENTS] = service_intents if not ep_svc_id: ep_svc_id = get_next_rsid() results[ENDPOINT_SERVICE_ID] = ep_svc_id results[SERVICE_ID] = ep_svc_id if not fw_id: fw_id = "endpoint-in-error" results[ENDPOINT_FRAMEWORK_UUID] = fw_id if pkg_vers: if isinstance(pkg_vers, type(tuple())): pkg_vers = [pkg_vers] for pkg_ver in pkg_vers: results[pkg_ver[0]] = pkg_ver[1] results[ENDPOINT_ID] = create_uuid() results[SERVICE_IMPORTED] = "true" return results
Constructs a dictionary of RSA properties from the given arguments :param object_class: Service specifications :param exported_cfgs: Export configurations :param remote_intents: Supported remote intents :param ep_svc_id: Endpoint service ID :param fw_id: Remote Framework ID :param pkg_vers: Version number of the specification package :param service_intents: Service intents :return: A dictionary of properties
def wait_for_bump(self, buttons, timeout_ms=None): start_time = time.time() if self.wait_for_pressed(buttons, timeout_ms): if timeout_ms is not None: timeout_ms -= int((time.time() - start_time) * 1000) return self.wait_for_released(buttons, timeout_ms) return False
Wait for the button to be pressed down and then released. Both actions must happen within timeout_ms.
def from_gromacs(cls, path, positions=None, forcefield=None, strict=True, **kwargs): if strict and positions is None: raise ValueError('Gromacs TOP files require initial positions.') box = kwargs.pop('box', None) top = GromacsTopFile(path, includeDir=forcefield, periodicBoxVectors=box) return cls(master=top, topology=top.topology, positions=positions, box=box, path=path, **kwargs)
Loads a topology from a Gromacs TOP file located at `path`. Additional root directory for parameters can be specified with `forcefield`. Arguments --------- path : str Path to a Gromacs TOP file positions : simtk.unit.Quantity Atomic positions forcefield : str, optional Root directory for parameter files
def print_logins(logins): table = formatting.Table(['Date', 'IP Address', 'Successufl Login?']) for login in logins: table.add_row([login.get('createDate'), login.get('ipAddress'), login.get('successFlag')]) return table
Prints out the login history for a user
def _translate_key(key): d = _get_deprecated_option(key) if d: return d.rkey or key else: return key
if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns `key` as - is
def getNextTimeout(self): timeval = libusb1.timeval() result = libusb1.libusb_get_next_timeout( self.__context_p, byref(timeval)) if result == 0: return None elif result == 1: return timeval.tv_sec + (timeval.tv_usec * 0.000001) raiseUSBError(result)
Returns the next internal timeout that libusb needs to handle, in seconds, or None if no timeout is needed. You should not have to call this method, unless you are integrating this class with a polling mechanism.
def get_list_display(self, request): list_display = list( super(VersionedAdmin, self).get_list_display(request)) if self.list_display_show_identity: list_display = ['identity_shortener', ] + list_display if self.list_display_show_start_date: list_display += ['version_start_date', ] if self.list_display_show_end_date: list_display += ['version_end_date', ] return list_display + ['is_current', ]
This method determines which fields go in the changelist
def population_variant_regions(items, merged=False): def _get_variant_regions(data): out = dd.get_variant_regions(data) or dd.get_sample_callable(data) if merged and dd.get_variant_regions(data): merged_out = dd.get_variant_regions_merged(data) if merged_out: out = merged_out else: out = merge_overlaps(out, data) return out import pybedtools if len(items) == 1: return _get_variant_regions(items[0]) else: paired = vcfutils.get_paired(items) if paired: return _get_variant_regions(paired.tumor_data) else: vrs = [] for data in items: vr_bed = _get_variant_regions(data) if vr_bed: vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed)) vrs.sort(reverse=True) if vrs: return vrs[0][1]
Retrieve the variant region BED file from a population of items. If tumor/normal, return the tumor BED file. If a population, return the BED file covering the most bases.
def raw_encode(self, value): if type(value) in self.encoders: encoder = self.encoders[type(value)] return encoder(self, value) raise ValueError("No encoder for value '%s' of type '%s'" % (value, type(value)))
Run the encoder on a value
def register_on_additions_state_changed(self, callback): event_type = library.VBoxEventType.on_additions_state_change return self.event_source.register_callback(callback, event_type)
Set the callback function to consume on additions state changed events. Callback receives a IAdditionsStateChangedEvent object. Note: Interested callees should query IGuest attributes to find out what has changed. Returns the callback_id
def skewBy(self, value, origin=None): value = normalizers.normalizeTransformationSkewAngle(value) if origin is None: origin = (0, 0) origin = normalizers.normalizeCoordinateTuple(origin) self._skewBy(value, origin=origin)
Skew the object. >>> obj.skewBy(11) >>> obj.skewBy((25, 10), origin=(500, 500)) **value** must be rone of the following: * single :ref:`type-int-float` indicating the value to skew the x direction by. * iterable cointaining type :ref:`type-int-float` defining the values to skew the x and y directions by. **origin** defines the point at with the skew should originate. It must be a :ref:`type-coordinate` or ``None``. The default is ``(0, 0)``.
def get_pmid(doc_id): url_pmid = 'https://www.ebi.ac.uk/chembl/api/data/document.json' params = {'document_chembl_id': doc_id} res = requests.get(url_pmid, params=params) js = res.json() pmid = str(js['documents'][0]['pubmed_id']) return pmid
Get PMID from document_chembl_id Parameters ---------- doc_id : str Returns ------- pmid : str
def min_chans(self, min_chans): declustered = Party() for family in self.families: fam = Family(family.template) for d in family.detections: if d.no_chans > min_chans: fam.detections.append(d) declustered.families.append(fam) self.families = declustered.families return self
Remove detections with fewer channels used than min_chans :type min_chans: int :param min_chans: Minimum number of channels to allow a detection. :return: Party .. Note:: Works in place on Party. .. rubric:: Example >>> party = Party().read() >>> print(len(party)) 4 >>> party = party.min_chans(5) >>> print(len(party)) 1
def _check_link_completion(self, link, fail_pending=False, fail_running=False): status_vect = JobStatusVector() for job_key, job_details in link.jobs.items(): if job_key.find(JobDetails.topkey) >= 0: continue job_details.status = self._interface.check_job(job_details) if job_details.status == JobStatus.pending: if fail_pending: job_details.status = JobStatus.failed elif job_details.status == JobStatus.running: if fail_running: job_details.status = JobStatus.failed status_vect[job_details.status] += 1 link.jobs[job_key] = job_details link._set_status_self(job_details.jobkey, job_details.status) return status_vect
Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
def pack_value(self, value): if type(value) is tuple: return self.to_binary(*value) elif isinstance(value, dict): return self.to_binary(**value) elif isinstance(value, DictWrapper): return self.to_binary(**value._data) else: raise BadDataError('%s is not a tuple or a list' % (value))
This function allows Struct objects to be used in List and Object fields. Each item represents the arguments to pass to to_binary, either a tuple, a dictionary or a DictWrapper.
def browse(self): check = self._check marker_text = Random.string() self.vim.buffer.set_content([marker_text]) self._save() self._write_file(1) self._save() self._write_file2(1) self._save() self._write_file(2) self._write_file2(1) self._save() self.vim.cmd('ProHistoryFileBrowse {}'.format('test_file_1')) check(0, '*') self.vim.vim.feedkeys('j') self.vim.vim.feedkeys('j') later(lambda: self.vim.buffer.content.length.should.equal(3)) self.vim.vim.feedkeys('s') self._await_commit(0) self.vim.buffer.content.should.equal(List(marker_text))
Browse the history of a single file adds one commit that doesn't contain changes in test_file_1. there are four commits in summary, so the check for buffer line count compares with 3. at the end, a fifth commit must be present due to resetting the file contents.
def add_module(self, module, cython=False): name_module = module.__name__.split('.')[-1] short = ('|%s|' % name_module) long = (':mod:`~%s`' % module.__name__) self._short2long[short] = long for (name_member, member) in vars(module).items(): if self.consider_member( name_member, member, module): role = self.get_role(member, cython) short = ('|%s|' % name_member) medium = ('|%s.%s|' % (name_module, name_member)) long = (':%s:`~%s.%s`' % (role, module.__name__, name_member)) self.add_substitution(short, medium, long, module) if inspect.isclass(member): for name_submember, submember in vars(member).items(): if self.consider_member( name_submember, submember, module, member): role = self.get_role(submember, cython) short = ('|%s.%s|' % (name_member, name_submember)) medium = ('|%s.%s.%s|' % (name_module, name_member, name_submember)) long = (':%s:`~%s.%s.%s`' % (role, module.__name__, name_member, name_submember)) self.add_substitution(short, medium, long, module)
Add the given module, its members, and their submembers. The first examples are based on the site-package |numpy|: which is passed to method |Substituter.add_module|: >>> from hydpy.core.autodoctools import Substituter >>> substituter = Substituter() >>> import numpy >>> substituter.add_module(numpy) Firstly, the module itself is added: >>> substituter.find('|numpy|') |numpy| :mod:`~numpy` Secondly, constants like |numpy.nan| are added: >>> substituter.find('|numpy.nan|') |numpy.nan| :const:`~numpy.nan` Thirdly, functions like |numpy.clip| are added: >>> substituter.find('|numpy.clip|') |numpy.clip| :func:`~numpy.clip` Fourthly, clases line |numpy.ndarray| are added: >>> substituter.find('|numpy.ndarray|') |numpy.ndarray| :class:`~numpy.ndarray` When adding Cython modules, the `cython` flag should be set |True|: >>> from hydpy.cythons import pointerutils >>> substituter.add_module(pointerutils, cython=True) >>> substituter.find('set_pointer') |PPDouble.set_pointer| \ :func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer` |pointerutils.PPDouble.set_pointer| \ :func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
def flatten (d, *keys): flat = { } for k in keys: flat = merge(flat, d.pop(k, { })) return flat
Flattens the dictionary d by merging keys in order such that later keys take precedence over earlier keys.
def unbuffered_write(self, buf): if self.closed: raise ConnectionClosed() result = 0 try: result = self.sock.send(buf) except EnvironmentError as e: if e.errno != errno.EAGAIN: self._close(e) return if result != len(buf): self.write = self.buffered_write self.write_watcher.start() self.write(buf[result:])
Performs an unbuffered write, the default unless socket.send does not send everything, in which case an unbuffered write is done and the write method is set to be a buffered write until the buffer is empty once again. buf -- bytes to send
def _get_proposed_values(self): momentum_bar = self.momentum + 0.5 * self.stepsize * self.grad_log_position position_bar = self.position + self.stepsize * momentum_bar grad_log, _ = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf() momentum_bar = momentum_bar + 0.5 * self.stepsize * grad_log return position_bar, momentum_bar, grad_log
Method to perform time splitting using leapfrog
def download_urls(self): urls = { tfds.Split.TRAIN: ["train_clean100"], tfds.Split.VALIDATION: ["dev_clean"], tfds.Split.TEST: ["test_clean"], } if self.data in ["all", "clean360"]: urls[tfds.Split.TRAIN].append("train_clean360") if self.data == "all": urls[tfds.Split.TRAIN].extend(["train_clean360", "train_other500"]) urls[tfds.Split.VALIDATION].append("dev_other") urls[tfds.Split.TEST].append("test_other") urls = { split: [_DL_URLS[name] for name in names ] for split, names in urls.items() } return urls
Returns download urls for this config.
def _list_templates(settings): for idx, option in enumerate(settings.config.get("project_templates"), start=1): puts(" {0!s:5} {1!s:36}".format( colored.yellow("[{0}]".format(idx)), colored.cyan(option.get("name")) )) if option.get("url"): puts(" {0}\n".format(option.get("url")))
List templates from settings.
def psql(self, *psqlargs): db, env = self.get_db_args_env() args = [ '-v', 'ON_ERROR_STOP=on', '-d', db['name'], '-h', db['host'], '-U', db['user'], '-w', '-A', '-t' ] + list(psqlargs) stdout, stderr = External.run('psql', args, capturestd=True, env=env) if stderr: log.warn('stderr: %s', stderr) log.debug('stdout: %s', stdout) return stdout
Run a psql command
def to_one_str(cls, value, *args, **kwargs): if kwargs.get('wrapper'): return cls._wrapper_to_one_str(value) return _es.to_dict_str(value)
Convert single record's values to str
def add_moving_summary(*args, **kwargs): decay = kwargs.pop('decay', 0.95) coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY) summ_coll = kwargs.pop('summary_collections', None) assert len(kwargs) == 0, "Unknown arguments: " + str(kwargs) ctx = get_current_tower_context() if ctx is not None and not ctx.is_main_training_tower: return [] graph = tf.get_default_graph() try: control_flow_ctx = graph._get_control_flow_context() if control_flow_ctx is not None and control_flow_ctx.IsXLAContext(): return except Exception: pass if tf.get_variable_scope().reuse is True: logger.warn("add_moving_summary() called under reuse=True scope, ignored.") return [] for x in args: assert isinstance(x, (tf.Tensor, tf.Variable)), x assert x.get_shape().ndims == 0, \ "add_moving_summary() only accepts scalar tensor! Got one with {}".format(x.get_shape()) ema_ops = [] for c in args: name = re.sub('tower[0-9]+/', '', c.op.name) with tf.name_scope(None): if not c.dtype.is_floating: c = tf.cast(c, tf.float32) with _enter_vs_reuse_ns('EMA') as vs: ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype, initializer=tf.constant_initializer(), trainable=False) ns = vs.original_name_scope with tf.name_scope(ns): ema_op = moving_averages.assign_moving_average( ema_var, c, decay, zero_debias=True, name=name + '_EMA_apply') ema_ops.append(ema_op) with tf.name_scope(None): tf.summary.scalar( name + '-summary', ema_op, collections=summ_coll) if coll is not None: for op in ema_ops: tf.add_to_collection(coll, op) return ema_ops
Summarize the moving average for scalar tensors. This function is a no-op if not calling from main training tower. Args: args: scalar tensors to summarize decay (float): the decay rate. Defaults to 0.95. collection (str or None): the name of the collection to add EMA-maintaining ops. The default will work together with the default :class:`MovingAverageSummary` callback. summary_collections ([str]): the names of collections to add the summary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`). Returns: [tf.Tensor]: list of tensors returned by assign_moving_average, which can be used to maintain the EMA.
def _transform_delta(f:Formula, formula2AtomicFormula): t = type(f) if t == PLNot: return PLNot(_transform_delta(f, formula2AtomicFormula)) elif t == PLAnd or t == PLOr or t == PLImplies or t == PLEquivalence: return t([_transform_delta(subf, formula2AtomicFormula) for subf in f.formulas]) elif t == PLTrue or t == PLFalse: return f else: return formula2AtomicFormula[f]
From a Propositional Formula to a Propositional Formula with non-propositional subformulas replaced with a "freezed" atomic formula.
def react(self, emojiname): self._client.react_to_message( emojiname=emojiname, channel=self._body['channel'], timestamp=self._body['ts'])
React to a message using the web api
def is_address_in_network(network, address): try: network = netaddr.IPNetwork(network) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) if address in network: return True else: return False
Determine whether the provided address is within a network range. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param address: An individual IPv4 or IPv6 address without a net mask or subnet prefix. For example, '192.168.1.1'. :returns boolean: Flag indicating whether address is in network.
def process_dividends(self, next_session, asset_finder, adjustment_reader): position_tracker = self.position_tracker held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( held_sids, next_session, asset_finder ) stock_dividends = ( adjustment_reader.get_stock_dividends_with_ex_date( held_sids, next_session, asset_finder ) ) position_tracker.earn_dividends( cash_dividends, stock_dividends, ) self._cash_flow( position_tracker.pay_dividends( next_session, ), )
Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session
def maybe_new(cls, values, use_comma=True): if len(values) == 1: return values[0] else: return cls(values, use_comma=use_comma)
If `values` contains only one item, return that item. Otherwise, return a List as normal.
def remove_container(self, container, v=False, link=False, force=False): params = {'v': v, 'link': link, 'force': force} res = self._delete( self._url("/containers/{0}", container), params=params ) self._raise_for_status(res)
Remove a container. Similar to the ``docker rm`` command. Args: container (str): The container to remove v (bool): Remove the volumes associated with the container link (bool): Remove the specified link and not the underlying container force (bool): Force the removal of a running container (uses ``SIGKILL``) Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def convert(self, value, view): is_mapping = isinstance(self.template, MappingTemplate) for candidate in self.allowed: try: if is_mapping: if isinstance(candidate, Filename) and \ candidate.relative_to: next_template = candidate.template_with_relatives( view, self.template ) next_template.subtemplates[view.key] = as_template( candidate ) else: next_template = MappingTemplate({view.key: candidate}) return view.parent.get(next_template)[view.key] else: return view.get(candidate) except ConfigTemplateError: raise except ConfigError: pass except ValueError as exc: raise ConfigTemplateError(exc) self.fail( u'must be one of {0}, not {1}'.format( repr(self.allowed), repr(value) ), view )
Ensure that the value follows at least one template.
def createsnippet(self, project_id, title, file_name, code, visibility_level=0): data = {'id': project_id, 'title': title, 'file_name': file_name, 'code': code} if visibility_level in [0, 10, 20]: data['visibility_level'] = visibility_level request = requests.post( '{0}/{1}/snippets'.format(self.projects_url, project_id), data=data, verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout) if request.status_code == 201: return request.json() else: return False
Creates an snippet :param project_id: project id to create the snippet under :param title: title of the snippet :param file_name: filename for the snippet :param code: content of the snippet :param visibility_level: snippets can be either private (0), internal(10) or public(20) :return: True if correct, false if failed
def __join_connections(self): interval_s = nsq.config.client.CONNECTION_CLOSE_AUDIT_WAIT_S graceful_wait_s = nsq.config.client.CONNECTION_QUIT_CLOSE_TIMEOUT_S graceful = False while graceful_wait_s > 0: if not self.__connections: break connected_list = [c.is_connected for (n, c, g) in self.__connections] if any(connected_list) is False: graceful = True break gevent.sleep(interval_s) graceful_wait_s -= interval_s if graceful is False: connected_list = [c for (n, c, g) in self.__connections if c.is_connected] _logger.error("We were told to terminate, but not all " "connections were stopped: [%s]", connected_list)
Wait for all connections to close. There are no side-effects here. We just want to try and leave -after- everything has closed, in general.
def find(self, selector, **kwargs): self.debug_log("Finding element with selector: %s" % selector) elements = self.find_all(selector, **kwargs) if len(elements): self.debug_log("find (%s): Element found" % (selector)) return elements[0] else: self.debug_log("find (%s): No element found" % (selector)) return None
Find an element with a selector Args: selector (str): the selector used to find the element Kwargs: wait_until_present (bool) wait_until_visible (bool) raise_exception (bool) Returns: None if no element was found proxy_element is an element was found Raises: this function might raise an exception depending on the raise_exception kwargs or the config proxy_driver:raise_exception
def ValidatePassword(self, password): password = to_aes_key(password) return hashlib.sha256(password).digest() == self.LoadStoredData('PasswordHash')
Validates if the provided password matches with the stored password. Args: password (string): a password. Returns: bool: the provided password matches with the stored password.
def pad(segment, size): for i in range(size - len(segment)): segment.append(0) assert len(segment) == size
Add zeroes to a segment until it reaches a certain size. :param segment: the segment to pad :param size: the size to which to pad the segment
def process_response_params( self, params: Sequence[ExtensionParameter], accepted_extensions: Sequence["Extension"], ) -> PerMessageDeflate: if any(other.name == self.name for other in accepted_extensions): raise NegotiationError(f"Received duplicate {self.name}") ( server_no_context_takeover, client_no_context_takeover, server_max_window_bits, client_max_window_bits, ) = _extract_parameters(params, is_server=False) if self.server_no_context_takeover: if not server_no_context_takeover: raise NegotiationError("Expected server_no_context_takeover") if self.client_no_context_takeover: if not client_no_context_takeover: client_no_context_takeover = True if self.server_max_window_bits is None: pass else: if server_max_window_bits is None: raise NegotiationError("Expected server_max_window_bits") elif server_max_window_bits > self.server_max_window_bits: raise NegotiationError("Unsupported server_max_window_bits") if self.client_max_window_bits is None: if client_max_window_bits is not None: raise NegotiationError("Unexpected client_max_window_bits") elif self.client_max_window_bits is True: pass else: if client_max_window_bits is None: client_max_window_bits = self.client_max_window_bits elif client_max_window_bits > self.client_max_window_bits: raise NegotiationError("Unsupported client_max_window_bits") return PerMessageDeflate( server_no_context_takeover, client_no_context_takeover, server_max_window_bits or 15, client_max_window_bits or 15, self.compress_settings, )
Process response parameters. Return an extension instance.
def roundrect(surface, rect, color, rounding=5, unit=PIXEL): if unit == PERCENT: rounding = int(min(rect.size) / 2 * rounding / 100) rect = pygame.Rect(rect) color = pygame.Color(*color) alpha = color.a color.a = 0 pos = rect.topleft rect.topleft = 0, 0 rectangle = pygame.Surface(rect.size, SRCALPHA) circle = pygame.Surface([min(rect.size) * 3] * 2, SRCALPHA) pygame.draw.ellipse(circle, (0, 0, 0), circle.get_rect(), 0) circle = pygame.transform.smoothscale(circle, (rounding, rounding)) rounding = rectangle.blit(circle, (0, 0)) rounding.bottomright = rect.bottomright rectangle.blit(circle, rounding) rounding.topright = rect.topright rectangle.blit(circle, rounding) rounding.bottomleft = rect.bottomleft rectangle.blit(circle, rounding) rectangle.fill((0, 0, 0), rect.inflate(-rounding.w, 0)) rectangle.fill((0, 0, 0), rect.inflate(0, -rounding.h)) rectangle.fill(color, special_flags=BLEND_RGBA_MAX) rectangle.fill((255, 255, 255, alpha), special_flags=BLEND_RGBA_MIN) return surface.blit(rectangle, pos)
Draw an antialiased round rectangle on the surface. surface : destination rect : rectangle color : rgb or rgba radius : 0 <= radius <= 1 :source: http://pygame.org/project-AAfilledRoundedRect-2349-.html
def set_mode(self, mode, custom_mode = 0, custom_sub_mode = 0): mav_autopilot = self.field('HEARTBEAT', 'autopilot', None) if mav_autopilot == mavlink.MAV_AUTOPILOT_PX4: self.set_mode_px4(mode, custom_mode, custom_sub_mode) else: self.set_mode_apm(mode)
set arbitrary flight mode
def attention_bias_batch(batch_coordinates_q, batch_coordinates_k=None, condition_fn=None): if batch_coordinates_k is None: batch_coordinates_k = batch_coordinates_q def to_float(bc): bc = tf.squeeze(bc, 1) bc = tf.to_float(bc) return bc bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1) bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0) bias_batch = bc_h - bc_v bias_batch = condition_fn(bias_batch) bias_batch *= -1e9 return bias_batch
Generate a mask to prevent the batch to attend to each others. Args: batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the coordinates of the batches batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the coordinates of the batches. If None, do self-attention. condition_fn: Callable defining the attention mask. Returns: Float-like Tensor of shape [length_q, length_k] containing either 0 or -infinity (-1e9).
def _load_sequences_to_reference_gene(self, g_id, force_rerun=False): protein_seqs_pickle_path = op.join(self.sequences_by_gene_dir, '{}_protein_withseqs.pckl'.format(g_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_seqs_pickle_path): protein_pickle_path = self.gene_protein_pickles[g_id] protein_pickle = ssbio.io.load_pickle(protein_pickle_path) for strain, info in self.strain_infodict.items(): strain_sequences = SeqIO.index(info['genome_path'], 'fasta') strain_gene_functional = info['functional_genes'][g_id] if strain_gene_functional: strain_gene_key = self.df_orthology_matrix.at[g_id, strain] new_id = '{}_{}'.format(g_id, strain) if protein_pickle.sequences.has_id(new_id): continue protein_pickle.load_manual_sequence(seq=strain_sequences[strain_gene_key], ident=new_id, set_as_representative=False) protein_pickle.save_pickle(outfile=protein_seqs_pickle_path) return g_id, protein_seqs_pickle_path
Load orthologous strain sequences to reference Protein object, save as new pickle
def execute_operation( self, operation: OperationDefinitionNode, root_value: Any ) -> Optional[AwaitableOrValue[Any]]: type_ = get_operation_root_type(self.schema, operation) fields = self.collect_fields(type_, operation.selection_set, {}, set()) path = None try: result = ( self.execute_fields_serially if operation.operation == OperationType.MUTATION else self.execute_fields )(type_, root_value, path, fields) except GraphQLError as error: self.errors.append(error) return None except Exception as error: error = GraphQLError(str(error), original_error=error) self.errors.append(error) return None else: if isawaitable(result): async def await_result(): try: return await result except GraphQLError as error: self.errors.append(error) except Exception as error: error = GraphQLError(str(error), original_error=error) self.errors.append(error) return await_result() return result
Execute an operation. Implements the "Evaluating operations" section of the spec.
def compose_arrays(a1, a2, firstfield='etag'): assert len(a1) == len(a2), (len(a1), len(a2)) if a1.dtype.names is None and len(a1.shape) == 1: a1 = numpy.array(a1, numpy.dtype([(firstfield, a1.dtype)])) fields1 = [(f, a1.dtype.fields[f][0]) for f in a1.dtype.names] if a2.dtype.names is None: assert len(a2.shape) == 2, a2.shape width = a2.shape[1] fields2 = [('value%d' % i, a2.dtype) for i in range(width)] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for i in range(width): composite['value%d' % i] = a2[:, i] return composite fields2 = [(f, a2.dtype.fields[f][0]) for f in a2.dtype.names] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for f2 in dict(fields2): composite[f2] = a2[f2] return composite
Compose composite arrays by generating an extended datatype containing all the fields. The two arrays must have the same length.
def _get_uri(self): if not self.service.exists(): logging.warning("Service does not yet exist.") return self.service.settings.data['uri']
Will return the uri for an existing instance.
def get_window(self): wfrequencies = self._get_indices() / self.duration xfrequencies = wfrequencies * self.qprime / self.frequency norm = self.ntiles / (self.duration * self.sampling) * ( 315 * self.qprime / (128 * self.frequency)) ** (1/2.) return (1 - xfrequencies ** 2) ** 2 * norm
Generate the bi-square window for this row Returns ------- window : `numpy.ndarray`
def get_first_lang(): request_lang = request.headers.get('Accept-Language').split(',') if request_lang: lang = locale.normalize(request_lang[0]).split('.')[0] else: lang = False return lang
Get the first lang of Accept-Language Header.
def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None, strict_slashes=False): stream = False if hasattr(handler, 'view_class'): http_methods = ( 'GET', 'POST', 'PUT', 'HEAD', 'OPTIONS', 'PATCH', 'DELETE') methods = set() for method in http_methods: _handler = getattr(handler.view_class, method.lower(), None) if _handler: methods.add(method) if hasattr(_handler, 'is_stream'): stream = True if isinstance(handler, self.composition_view_class): methods = handler.handlers.keys() for _handler in handler.handlers.values(): if hasattr(_handler, 'is_stream'): stream = True break self.route(uri=uri, methods=methods, host=host, strict_slashes=strict_slashes, stream=stream)(handler) return handler
A helper method to register class instance or functions as a handler to the application url routes. :param handler: function or class instance :param uri: path of the URL :param methods: list or tuple of methods allowed, these are overridden if using a HTTPMethodView :param host: :return: function or class instance
def add_file(self, name="", hashalg="", hash="", comClasses=None, typelibs=None, comInterfaceProxyStubs=None, windowClasses=None): self.files.append(File(name, hashalg, hash, comClasses, typelibs, comInterfaceProxyStubs, windowClasses))
Shortcut for manifest.files.append
def _replace_scalars_in_kwargs(self, kwargs): _check_type('kwargs', kwargs, dict) new_kwargs = {} for (name, value) in iteritems(kwargs): if isinstance(value, list): new_kwargs[name] = self._replace_scalars_in_args(value) elif isinstance(value, dict): new_kwargs[name] = self._replace_scalars_in_kwargs(value) elif isinstance(value, string_types): new_kwargs[name] = self._replace_scalar(value) else: new_kwargs[name] = value return new_kwargs
Replace scalars in keyed arguments dictionary
def status(reset=None): global RECEIVED_INDICATION_DICT for host, count in six.iteritems(RECEIVED_INDICATION_DICT): print('Host %s Received %s indications' % (host, count)) if reset: for host in RECEIVED_INDICATION_DICT: RECEIVED_INDICATION_DICT[host] = 0 print('Host %s Reset: Received %s indications' % (host, RECEIVED_INDICATION_DICT[host])) print('counts reset to 0')
Show status of indications received. If optional reset attribute is True, reset the counter.
def sort_kate_imports(add_imports=(), remove_imports=()): document = kate.activeDocument() view = document.activeView() position = view.cursorPosition() selection = view.selectionRange() sorter = SortImports(file_contents=document.text(), add_imports=add_imports, remove_imports=remove_imports, settings_path=os.path.dirname(os.path.abspath(str(document.url().path())))) document.setText(sorter.output) position.setLine(position.line() + sorter.length_change) if selection: start = selection.start() start.setLine(start.line() + sorter.length_change) end = selection.end() end.setLine(end.line() + sorter.length_change) selection.setRange(start, end) view.setSelection(selection) view.setCursorPosition(position)
Sorts imports within Kate while maintaining cursor position and selection, even if length of file changes.
def get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False): return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only)
This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API. Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.
def draw_points(self, *points): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr[0] check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points)))
Draw multiple points on the current rendering target. Args: *points (Point): The points to draw. Raises: SDLError: If an error is encountered.
def move(self, x, y): SetWindowPos(self._hwnd, None, x, y, 0, 0, SWP_NOSIZE)
Move window top-left corner to position
def pages_breadcrumb(context, page, url='/'): lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) pages_navigation = None if page: pages_navigation = page.get_ancestors() context.update({'pages_navigation': pages_navigation, 'page': page}) return context
Render a breadcrumb like menu. Override ``pages/breadcrumb.html`` if you want to change the design. :param page: the current page :param url: not used anymore
def register_created(cls, key, image): if key in cls._stock: logger.info('Warning, replacing resource ' + str(key)) cls._stock[key] = {'type': 'created', 'image': image} logger.info('%s registered as %s' % ('data', key))
Register an already created image using key
def get_params(self): return self.timeout, self.xonxoff, self.rtscts, self.baudrate
Get parameters as a tuple. :return: timeout, xonxoff, rtscts, baudrate
def copy_dir( src_fs, src_path, dst_fs, dst_path, walker=None, on_copy=None, workers=0, ): on_copy = on_copy or (lambda *args: None) walker = walker or Walker() _src_path = abspath(normpath(src_path)) _dst_path = abspath(normpath(dst_path)) def src(): return manage_fs(src_fs, writeable=False) def dst(): return manage_fs(dst_fs, create=True) from ._bulk import Copier with src() as _src_fs, dst() as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): _thread_safe = is_thread_safe(_src_fs, _dst_fs) with Copier(num_workers=workers if _thread_safe else 0) as copier: _dst_fs.makedir(_dst_path, recreate=True) for dir_path, dirs, files in walker.walk(_src_fs, _src_path): copy_path = combine(_dst_path, frombase(_src_path, dir_path)) for info in dirs: _dst_fs.makedir(info.make_path(copy_path), recreate=True) for info in files: src_path = info.make_path(dir_path) dst_path = info.make_path(copy_path) copier.copy(_src_fs, src_path, _dst_fs, dst_path) on_copy(_src_fs, src_path, _dst_fs, dst_path)
Copy a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on the source filesystem. dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on the destination filesystem. walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable, optional): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy.
def calculate_anim(infiles, org_lengths): logger.info("Running ANIm") logger.info("Generating NUCmer command-lines") deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"]) logger.info("Writing nucmer output to %s", deltadir) if not args.skip_nucmer: joblist = anim.generate_nucmer_jobs( infiles, args.outdirname, nucmer_exe=args.nucmer_exe, filter_exe=args.filter_exe, maxmatch=args.maxmatch, jobprefix=args.jobprefix, ) if args.scheduler == "multiprocessing": logger.info("Running jobs with multiprocessing") if args.workers is None: logger.info("(using maximum number of available " + "worker threads)") else: logger.info("(using %d worker threads, if available)", args.workers) cumval = run_mp.run_dependency_graph( joblist, workers=args.workers, logger=logger ) logger.info("Cumulative return value: %d", cumval) if 0 < cumval: logger.warning( "At least one NUCmer comparison failed. " + "ANIm may fail." ) else: logger.info("All multiprocessing jobs complete.") else: logger.info("Running jobs with SGE") logger.info("Jobarray group size set to %d", args.sgegroupsize) run_sge.run_dependency_graph( joblist, logger=logger, jgprefix=args.jobprefix, sgegroupsize=args.sgegroupsize, sgeargs=args.sgeargs, ) else: logger.warning("Skipping NUCmer run (as instructed)!") logger.info("Processing NUCmer .delta files.") results = anim.process_deltadir(deltadir, org_lengths, logger=logger) if results.zero_error: if not args.skip_nucmer and args.scheduler == "multiprocessing": if 0 < cumval: logger.error( "This has possibly been a NUCmer run failure, " + "please investigate" ) logger.error(last_exception()) sys.exit(1) else: logger.error( "This is possibly due to a NUCmer comparison " + "being too distant for use. Please consider " + "using the --maxmatch option." ) logger.error( "This is alternatively due to NUCmer run " + "failure, analysis will continue, but please " + "investigate." ) if not args.nocompress: logger.info("Compressing/deleting %s", deltadir) compress_delete_outdir(deltadir) return results
Returns ANIm result dataframes for files in input directory. - infiles - paths to each input file - org_lengths - dictionary of input sequence lengths, keyed by sequence Finds ANI by the ANIm method, as described in Richter et al (2009) Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106. All FASTA format files (selected by suffix) in the input directory are compared against each other, pairwise, using NUCmer (which must be in the path). NUCmer output is stored in the output directory. The NUCmer .delta file output is parsed to obtain an alignment length and similarity error count for every unique region alignment between the two organisms, as represented by the sequences in the FASTA files. These are processed to give matrices of aligned sequence lengths, average nucleotide identity (ANI) percentages, coverage (aligned percentage of whole genome), and similarity error cound for each pairwise comparison.
def doc(self): return self._pv.doc or inspect.getdoc( getattr(type(self._target), self._pv.property, None)) or ''
Docstring of property on target or override specified on PV-object.
def read_csv_from_file(filename): logger_csvs.info("enter read_csv_from_file") d = {} l = [] try: logger_csvs.info("open file: {}".format(filename)) with open(filename, 'r') as f: r = csv.reader(f, delimiter=',') for idx, col in enumerate(next(r)): d[idx] = [] d = cast_values_csvs(d, idx, col) for row in r: for idx, col in enumerate(row): d = cast_values_csvs(d, idx, col) for idx, col in d.items(): l.append(col) except FileNotFoundError as e: print('CSV FileNotFound: ' + filename) logger_csvs.warn("read_csv_to_columns: FileNotFound: {}, {}".format(filename, e)) logger_csvs.info("exit read_csv_from_file") return l
Opens the target CSV file and creates a dictionary with one list for each CSV column. :param str filename: :return list of lists: column values
def dumps(self): r string = "" if self.row_height is not None: row_height = Command('renewcommand', arguments=[ NoEscape(r'\arraystretch'), self.row_height]) string += row_height.dumps() + '%\n' if self.col_space is not None: col_space = Command('setlength', arguments=[ NoEscape(r'\tabcolsep'), self.col_space]) string += col_space.dumps() + '%\n' return string + super().dumps()
r"""Turn the Latex Object into a string in Latex format.
def _convert_bin_to_datelike_type(bins, dtype): if is_datetime64tz_dtype(dtype): bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz) elif is_datetime_or_timedelta_dtype(dtype): bins = Index(bins.astype(np.int64), dtype=dtype) return bins
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is datelike Parameters ---------- bins : list-like of bins dtype : dtype of data Returns ------- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike
def words(self, quantity: int = 5) -> List[str]: words = self._data['words'].get('normal') words_list = [self.random.choice(words) for _ in range(quantity)] return words_list
Generate lis of the random words. :param quantity: Quantity of words. Default is 5. :return: Word list. :Example: [science, network, god, octopus, love]
def offers(self, cursor=None, order='asc', limit=10, sse=False): return self.horizon.account_offers(self.address, cursor=cursor, order=order, limit=limit, sse=sse)
Retrieve the offers JSON from this instance's Horizon server. Retrieve the offers JSON response for the account associated with this :class:`Address`. :param cursor: A paging token, specifying where to start returning records from. When streaming this can be set to "now" to stream object created since your request time. :type cursor: int, str :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :param bool sse: Use server side events for streaming responses.
def _getframe(level=0): if level < 0: level = 0 try: raise except: _, _, traceback = sys.exc_info() frame = traceback.tb_frame while ~level: frame = frame.f_back if frame is None: break level -= 1 finally: sys.exc_clear() if frame is None: raise ValueError('call stack is not deep enough') return frame
A reimplementation of `sys._getframe`. `sys._getframe` is a private function, and isn't guaranteed to exist in all versions and implementations of Python. This function is about 2 times slower than the native implementation. It relies on the asumption that the traceback objects have `tb_frame` attributues holding proper frame objects. :param level: The number of levels deep in the stack to return the frame from. Defaults to `0`. :returns: A frame object `levels` deep from the top of the stack.
def partition_distance(cx, cy): n = np.size(cx) _, cx = np.unique(cx, return_inverse=True) _, cy = np.unique(cy, return_inverse=True) _, cxy = np.unique(cx + cy * 1j, return_inverse=True) cx += 1 cy += 1 cxy += 1 Px = np.histogram(cx, bins=np.max(cx))[0] / n Py = np.histogram(cy, bins=np.max(cy))[0] / n Pxy = np.histogram(cxy, bins=np.max(cxy))[0] / n Hx = -np.sum(Px * np.log(Px)) Hy = -np.sum(Py * np.log(Py)) Hxy = -np.sum(Pxy * np.log(Pxy)) Vin = (2 * Hxy - Hx - Hy) / np.log(n) Min = 2 * (Hx + Hy - Hxy) / (Hx + Hy) return Vin, Min
This function quantifies the distance between pairs of community partitions with information theoretic measures. Parameters ---------- cx : Nx1 np.ndarray community affiliation vector X cy : Nx1 np.ndarray community affiliation vector Y Returns ------- VIn : Nx1 np.ndarray normalized variation of information MIn : Nx1 np.ndarray normalized mutual information Notes ----- (Definitions: VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n) MIn = 2MI(X,Y)/[H(X)+H(Y)] where H is entropy, MI is mutual information and n is number of nodes)
def _canceller_for(self, elts, event): def canceller(): try: self._event_deque.remove(event) self._in_deque.remove(elts) return True except ValueError: return False return canceller
Obtains a cancellation function that removes elts. The returned cancellation function returns ``True`` if all elements was removed successfully from the _in_deque, and false if it was not.
def write_corrected(self, output, clobber=False): if self.flatcorr != 'COMPLETE': self.science = self.science / self.invflat self.err = self.err / self.invflat if self.flshcorr != 'COMPLETE': self.science = self.science + self.flash if self.darkcorr != 'COMPLETE': self.science = self.science + self.dark if (self.ampstring == 'ABCD'): tmp_1, tmp_2 = np.split(self.science, 2, axis=1) self.hdulist['sci', 1].data = tmp_1.copy() self.hdulist['sci', 2].data = tmp_2[::-1, :].copy() tmp_1, tmp_2 = np.split(self.err, 2, axis=1) self.hdulist['err', 1].data = tmp_1.copy() self.hdulist['err', 2].data = tmp_2[::-1, :].copy() else: self.hdulist['sci', 1].data = self.science.copy() self.hdulist['err', 1].data = self.err.copy() self.hdulist.writeto(output, overwrite=clobber)
Write out the destriped data.
def url(self): base_url = "https://lists.fedoraproject.org/archives" archived_at = self._get_archived_at() if archived_at and archived_at.startswith("<"): archived_at = archived_at[1:] if archived_at and archived_at.endswith(">"): archived_at = archived_at[:-1] if archived_at and archived_at.startswith("http"): return archived_at elif archived_at: return base_url + archived_at else: return None
An URL to the email in HyperKitty Returns: str or None: A relevant URL.
def require_app(app_name, api_style=False): iterable = (inspect.getmodule(frame[0]) for frame in inspect.stack()) modules = [module for module in iterable if module is not None] if api_style: m = modules[2] else: m = modules[1] m._REQUIRED_APP = getattr(m, '_REQUIRED_APP', []) m._REQUIRED_APP.append(app_name) LOG.debug('require_app: %s is required by %s', app_name, m.__name__)
Request the application to be automatically loaded. If this is used for "api" style modules, which is imported by a client application, set api_style=True. If this is used for client application module, set api_style=False.
def name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset): start_codon_no = codon_no - 1 if len(sbjct_nucs) == 3: start_codon_no = codon_no start_codon = get_codon(sbjct_seq, start_codon_no, start_offset) end_codon = get_codon(sbjct_seq, codon_no, start_offset) pos_name = "p.%s%d_%s%dins%s"%(aa(start_codon), start_codon_no, aa(end_codon), codon_no, aa_alt) return pos_name
This function is used to name a insertion mutation based on the HGVS recommendation.
def assert_legal_arguments(kwargs): seen_layers = set() for k, v in kwargs.items(): if k not in LEGAL_ARGUMENTS: raise ValueError('Illegal argument <{0}>!'.format(k)) if k in AESTHETICS: if v in seen_layers: raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v)) seen_layers.add(v) if k in VALUES: if not isinstance(v, six.string_types) and not isinstance(v, list): raise ValueError('Value <{0}> must be either string or list'.format(k)) if isinstance(v, list): if len(v) == 0: raise ValueError('Rules cannot be empty list') for rule_matcher, rule_value in v: if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types): raise ValueError('Rule tuple elements must be strings')
Assert that PrettyPrinter arguments are correct. Raises ------ ValueError In case there are unknown arguments or a single layer is mapped to more than one aesthetic.
def GetRequestFormatMode(request, method_metadata): if request.path.startswith("/api/v2/"): return JsonMode.PROTO3_JSON_MODE if request.args.get("strip_type_info", ""): return JsonMode.GRR_TYPE_STRIPPED_JSON_MODE for http_method, unused_url, options in method_metadata.http_methods: if (http_method == request.method and options.get("strip_root_types", False)): return JsonMode.GRR_ROOT_TYPES_STRIPPED_JSON_MODE return JsonMode.GRR_JSON_MODE
Returns JSON format mode corresponding to a given request and method.
def get_bins(self): if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.BinList(self._results, runtime=self._runtime)
Gets the bin list resulting from the search. return: (osid.resource.BinList) - the bin list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.*
def change_parent_of_project(self, ID, NewParrentID): log.info('Change parrent for project %s to %s' % (ID, NewParrentID)) data = {'parent_id': NewParrentID} self.put('projects/%s/change_parent.json' % ID, data)
Change parent of project.
def list(self): before, after = self.filename_template.split('%s', 1) filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before), re.escape(after))) result = [] for filename in os.listdir(self.path): if filename.endswith(_fs_transaction_suffix): continue match = filename_re.match(filename) if match is not None: result.append(match.group(1)) return result
Lists all sessions in the store. .. versionadded:: 0.6
def preview(self, when=timezone.now(), **kwargs): return self.operate_on(when=when, apply=False, **kwargs)
Preview transactions, but don't actually save changes to list.
def set_code(self, code): attributes = [] strip = lambda s: s.strip('u').strip("'").strip('"') for attr_dict in parse_dict_strings(unicode(code).strip()[19:-1]): attrs = list(strip(s) for s in parse_dict_strings(attr_dict[1:-1])) attributes.append(dict(zip(attrs[::2], attrs[1::2]))) if not attributes: return figure_attributes = attributes[0] for key, widget in self.figure_attributes_panel: try: obj = figure_attributes[key] kwargs_key = key + "_kwargs" if kwargs_key in figure_attributes: widget.set_kwargs(figure_attributes[kwargs_key]) except KeyError: obj = "" widget.code = charts.object2code(key, obj) self.all_series_panel.update(attributes[1:])
Update widgets from code
def disassociate_health_monitor(self, pool, health_monitor): path = (self.disassociate_pool_health_monitors_path % {'pool': pool, 'health_monitor': health_monitor}) return self.delete(path)
Disassociate specified load balancer health monitor and pool.
def _parse_fmt(fmt, color_key='colors', ls_key='linestyles', marker_key='marker'): try: color = mcolors.colorConverter.to_rgb(fmt) except ValueError: pass else: if fmt not in mlines.lineMarkers: return {color_key:color} result = dict() if fmt.find('--') >= 0: result[ls_key] = '--' fmt = fmt.replace('--', '') if fmt.find('-.') >= 0: result[ls_key] = '-.' fmt = fmt.replace('-.', '') if fmt.find(' ') >= 0: result[ls_key] = 'None' fmt = fmt.replace(' ', '') for c in list(fmt): if c in mlines.lineStyles: if ls_key in result: raise ValueError('Illegal format string; two linestyle symbols') result[ls_key] = c elif c in mlines.lineMarkers: if marker_key in result: raise ValueError('Illegal format string; two marker symbols') result[marker_key] = c elif c in mcolors.colorConverter.colors: if color_key in result: raise ValueError('Illegal format string; two color symbols') result[color_key] = c else: raise ValueError('Unrecognized character %c in format string' % c) return result
Modified from matplotlib's _process_plot_format function.
def add_subtask(self, task, params={}, **options): path = "/tasks/%s/subtasks" % (task) return self.client.post(path, params, **options)
Creates a new subtask and adds it to the parent task. Returns the full record for the newly created subtask. Parameters ---------- task : {Id} The task to add a subtask to. [data] : {Object} Data for the request
def get_options(silent=False, hook=True): options_ = {} if silent: options_['silent'] = silent if not hook: options_['hook'] = hook if options_: return '?' + urlencode(options_).lower() else: return ''
Generate a query string with the appropriate options. :param silent: If set to true, the object will not be bumped up in the stream and notifications will not be generated. :type silent: bool :param hook: True if hooks should be executed for the change, false otherwise. :type hook: bool :return: The generated query string :rtype: str
def number_of_interactions(self, u=None, v=None, t=None): if t is None: if u is None: return int(self.size()) elif u is not None and v is not None: if v in self._succ[u]: return 1 else: return 0 else: if u is None: return int(self.size(t)) elif u is not None and v is not None: if v in self._succ[u]: if self.__presence_test(u, v, t): return 1 else: return 0
Return the number of interaction between two nodes at time t. Parameters ---------- u, v : nodes, optional (default=all interaction) If u and v are specified, return the number of interaction between u and v. Otherwise return the total number of all interaction. t : snapshot id (default=None) If None will be returned the number of edges on the flattened graph. Returns ------- nedges : int The number of interaction in the graph. If nodes u and v are specified return the number of interaction between those nodes. If a single node is specified return None. See Also -------- size Examples -------- >>> G = dn.DynDiGraph() >>> G.add_path([0,1,2,3], t=0) >>> G.number_of_interactions() 3 >>> G.number_of_interactions(0,1, t=0) 1 >>> G.add_edge(3, 4, t=1) >>> G.number_of_interactions() 4
def cmd_dhcp_discover(iface, timeout, verbose): conf.verb = False if iface: conf.iface = iface conf.checkIPaddr = False hw = get_if_raw_hwaddr(conf.iface) ether = Ether(dst="ff:ff:ff:ff:ff:ff") ip = IP(src="0.0.0.0",dst="255.255.255.255") udp = UDP(sport=68,dport=67) bootp = BOOTP(chaddr=hw) dhcp = DHCP(options=[("message-type","discover"),"end"]) dhcp_discover = ether / ip / udp / bootp / dhcp ans, unans = srp(dhcp_discover, multi=True, timeout=5) for _, pkt in ans: if verbose: print(pkt.show()) else: print(pkt.summary())
Send a DHCP request and show what devices has replied. Note: Using '-v' you can see all the options (like DNS servers) included on the responses. \b # habu.dhcp_discover Ether / IP / UDP 192.168.0.1:bootps > 192.168.0.5:bootpc / BOOTP / DHCP
def hazard_notes(self): notes = [] hazard = definition(self.hazard.keywords.get('hazard')) if 'notes' in hazard: notes += hazard['notes'] if self.hazard.keywords['layer_mode'] == 'classified': if 'classified_notes' in hazard: notes += hazard['classified_notes'] if self.hazard.keywords['layer_mode'] == 'continuous': if 'continuous_notes' in hazard: notes += hazard['continuous_notes'] if self.hazard.keywords['hazard_category'] == 'single_event': if 'single_event_notes' in hazard: notes += hazard['single_event_notes'] if self.hazard.keywords['hazard_category'] == 'multiple_event': if 'multi_event_notes' in hazard: notes += hazard['multi_event_notes'] return notes
Get the hazard specific notes defined in definitions. This method will do a lookup in definitions and return the hazard definition specific notes dictionary. This is a helper function to make it easy to get hazard specific notes from the definitions metadata. .. versionadded:: 3.5 :returns: A list like e.g. safe.definitions.hazard_land_cover[ 'notes'] :rtype: list, None