Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
22,000
def _check_log_scale(base, sides, scales, coord): def is_log(trans): return (trans.__class__.__name__.startswith() and hasattr(trans, )) base_x, base_y = base, base x_is_log = is_log(scales.x.trans) y_is_log = is_log(scales.y.trans) if isinstance(coord, coord_flip): x_is_log, y_is_log = y_is_log, x_is_log if in sides or in sides: if base_x is None: base_x = scales.x.trans.base if not x_is_log: warnings.warn( "annotation_logticks for x-axis which does not have " "a log scale. The logticks may not make sense.", PlotnineWarning) elif x_is_log and base_x != scales.x.trans.base: warnings.warn( "The x-axis is log transformed in base {} ," "but the annotation_logticks are computed in base {}" "".format(base_x, scales.x.trans.base), PlotnineWarning) if in sides or in sides: if base_y is None: base_y = scales.y.trans.base if not y_is_log: warnings.warn( "annotation_logticks for y-axis which does not have " "a log scale. The logticks may not make sense.", PlotnineWarning) elif y_is_log and base_y != scales.x.trans.base: warnings.warn( "The y-axis is log transformed in base {} ," "but the annotation_logticks are computed in base {}" "".format(base_y, scales.x.trans.base), PlotnineWarning) return base_x, base_y
Check the log transforms Parameters ---------- base : float or None Base of the logarithm in which the ticks will be calculated. If ``None``, the base of the log transform the scale will be used. sides : str (default: bl) Sides onto which to draw the marks. Any combination chosen from the characters ``btlr``, for *bottom*, *top*, *left* or *right* side marks. If ``coord_flip()`` is used, these are the sides *after* the flip. scales : SimpleNamespace ``x`` and ``y`` scales. coord : coord Coordinate (e.g. coord_cartesian) system of the geom. Returns ------- out : tuple The bases (base_x, base_y) to use when generating the ticks.
22,001
def primers(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3, end_gc=False, tm_parameters=, overhangs=None, structure=False): end on G or C if not overhangs: overhangs = [None, None] templates = [dna, dna.reverse_complement()] primer_list = [] for template, overhang in zip(templates, overhangs): primer_i = primer(template, tm=tm, min_len=min_len, tm_undershoot=tm_undershoot, tm_overshoot=tm_overshoot, end_gc=end_gc, tm_parameters=tm_parameters, overhang=overhang, structure=structure) primer_list.append(primer_i) return primer_list
Design primers for PCR amplifying any arbitrary sequence. :param dna: Input sequence. :type dna: coral.DNA :param tm: Ideal primer Tm in degrees C. :type tm: float :param min_len: Minimum primer length. :type min_len: int :param tm_undershoot: Allowed Tm undershoot. :type tm_undershoot: float :param tm_overshoot: Allowed Tm overshoot. :type tm_overshoot: float :param end_gc: Obey the 'end on G or C' rule. :type end_gc: bool :param tm_parameters: Melting temp calculator method to use. :type tm_parameters: string :param overhangs: 2-tuple of overhang sequences. :type overhangs: tuple :param structure: Evaluate each primer for structure, with warning for high structure. :type structure: bool :returns: A list primers (the output of primer). :rtype: list
22,002
def init_layer(self): self.layer = self.vector.GetLayer() self.__features = [None] * self.nfeatures
initialize a layer object Returns -------
22,003
def new_bundle(self, name: str, created_at: dt.datetime=None) -> models.Bundle: new_bundle = self.Bundle(name=name, created_at=created_at) return new_bundle
Create a new file bundle.
22,004
def describe_usage_plans(name=None, plan_id=None, region=None, key=None, keyid=None, profile=None): usage plan nameusage plan id try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) plans = _multi_call(conn.get_usage_plans, ) if name: plans = _filter_plans(, name, plans) if plan_id: plans = _filter_plans(, plan_id, plans) return {: [_convert_datetime_str(plan) for plan in plans]} except ClientError as e: return {: __utils__[](e)}
Returns a list of existing usage plans, optionally filtered to match a given plan name .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_usage_plans salt myminion boto_apigateway.describe_usage_plans name='usage plan name' salt myminion boto_apigateway.describe_usage_plans plan_id='usage plan id'
22,005
def _calc_thumb_filename(self, thumb_name): filename_split = self.name.rsplit(, 1) file_name = filename_split[0] file_extension = self.get_thumbnail_format() return % (file_name, thumb_name, file_extension)
Calculates the correct filename for a would-be (or potentially existing) thumbnail of the given size. NOTE: This includes the path leading up to the thumbnail. IE: uploads/cbid_images/photo.png size: (tuple) In the format of (width, height) Returns a string filename.
22,006
def calculate(self): self.rating = 0 for f in self.factors: self._update_pref(f.min, f.max, self.world.tax_rate)
calculates the estimated happiness of a person living in a world self._update_pref(self.person.prefs['tax_min'], self.person.prefs['tax_max'], self.world.tax_rate) self._update_pref(self.person.prefs['tradition'], self.person.prefs['tradition'], self.world.tradition) self._update_pref(self.person.prefs['equity'], self.person.prefs['equity'], self.world.equity)
22,007
def values(self): tmp = self while tmp is not None: yield tmp.data tmp = tmp.next
in order
22,008
def run_type(self): METAGGA_TYPES = {"TPSS", "RTPSS", "M06L", "MBJL", "SCAN", "MS0", "MS1", "MS2"} if self.parameters.get("LHFCALC", False): rt = "HF" elif self.parameters.get("METAGGA", "").strip().upper() in METAGGA_TYPES: rt = self.parameters["METAGGA"].strip().upper() elif self.parameters.get("LUSE_VDW", False): vdw_gga = {"RE": "DF", "OR": "optPBE", "BO": "optB88", "MK": "optB86b", "ML": "DF2"} gga = self.parameters.get("GGA").upper() rt = "vdW-" + vdw_gga[gga] elif self.potcar_symbols[0].split()[0] == : rt = "LDA" else: rt = "GGA" if self.is_hubbard: rt += "+U" return rt
Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs. TODO: Fix for other functional types like PW91, other vdW types, etc.
22,009
def check_rdn_deposits(raiden, user_deposit_proxy: UserDeposit): while True: rei_balance = user_deposit_proxy.effective_balance(raiden.address, "latest") rdn_balance = to_rdn(rei_balance) if rei_balance < MIN_REI_THRESHOLD: click.secho( ( f fs RDN balance of {rdn_balance} is below the minimum threshold. Provided that you have either a monitoring service or a path finding service activated, your node is not going to be able to pay those services which may lead to denial of service or loss of funds.red', ) gevent.sleep(CHECK_RDN_MIN_DEPOSIT_INTERVAL)
Check periodically for RDN deposits in the user-deposits contract
22,010
def _variants_fills(fields, fills, info_types): if fills is None: fills = dict() for f, vcf_type in zip(fields, info_types): if f == : fills[f] = False elif f not in fills: if f in config.STANDARD_VARIANT_FIELDS: fills[f] = config.DEFAULT_VARIANT_FILL[f] else: fills[f] = config.DEFAULT_FILL_MAP[vcf_type] fills = tuple(fills[f] for f in fields) return fills
Utility function to determine fill values for variants fields with missing values.
22,011
def add_message(self, msg_content, folder, **kwargs): content = {: kwargs} content[][] = str(folder) content[][] = {: msg_content} return self.request(, content)
Inject a message :params string msg_content: The entire message's content. :params string folder: Folder pathname (starts with '/') or folder ID
22,012
def randomize_args(self): s current location (*spiro_args*). ' args = self.spiro_args + np.random.normal(0, self.move_radius, self.spiro_args.shape) np.clip(args, -199, 199, args) while args[0] == 0 or args[1] == 0: args = self.spiro_args + np.random.normal(0, self.move_radius, self.spiro_args.shape) np.clip(args, -199, 199, args) return args
Get new parameters for spirograph generation near agent's current location (*spiro_args*).
22,013
def _read_output(self, stream, callback, output_file): if (callback is None and output_file is None) or stream.closed: return False line = stream.readline() if line: if callback is not None: callback(line.decode(), self._data, self._store, self._signal, self._context) if output_file is not None: output_file.write(line) return True else: return False
Read the output of the process, executed the callback and save the output. Args: stream: A file object pointing to the output stream that should be read. callback(callable, None): A callback function that is called for each new line of output. output_file: A file object to which the full output is written. Returns: bool: True if a line was read from the output, otherwise False.
22,014
def _get_lsun(directory, category, split_name): generator_utils.maybe_download(directory, _LSUN_DATA_FILENAME % (category, split_name), _LSUN_URL % (category, split_name))
Downloads all lsun files to directory unless they are there.
22,015
def evaluator(evaluate): @functools.wraps(evaluate) def inspyred_evaluator(candidates, args): fitness = [] for candidate in candidates: fitness.append(evaluate(candidate, args)) return fitness inspyred_evaluator.single_evaluation = evaluate return inspyred_evaluator
Return an inspyred evaluator function based on the given function. This function generator takes a function that evaluates only one candidate. The generator handles the iteration over each candidate to be evaluated. The given function ``evaluate`` must have the following signature:: fitness = evaluate(candidate, args) This function is most commonly used as a function decorator with the following usage:: @evaluator def evaluate(candidate, args): # Implementation of evaluation pass The generated function also contains an attribute named ``single_evaluation`` which holds the original evaluation function. In this way, the original single-candidate function can be retrieved if necessary.
22,016
def info(self, channel_name): channel_id = self.get_channel_id(channel_name) self.params.update({: channel_id}) return FromUrl(, self._requests)(data=self.params).get()
https://api.slack.com/methods/channels.info
22,017
async def resume_dialog(self, dc, reason: DialogReason, result: object): return await dc.EndDialog(result)
Method called when an instance of the dialog is being returned to from another dialog that was started by the current instance using `begin_dialog()`. If this method is NOT implemented then the dialog will be automatically ended with a call to `end_dialog()`. Any result passed from the called dialog will be passed to the current dialog's parent. :param dc: The dialog context for the current turn of conversation. :param reason: Reason why the dialog resumed. :param result: (Optional) value returned from the dialog that was called. The type of the value returned is dependent on the dialog that was called. :return:
22,018
def _years_in_date_range_within_decade(self, decade, begin_date, end_date): begin_year = begin_date.year end_year = end_date.year if begin_year < decade: begin_year = decade if end_year > decade + 9: end_year = decade + 9 return list(range(begin_year, end_year + 1))
Return a list of years in one decade which is covered by date range.
22,019
def select(self, select, table_name, where=None, extra=None): self.verify_table_existence(table_name) return self.execute_query( six.text_type(Select(select, table_name, where, extra)), logging.getLogger().findCaller(), )
Send a SELECT query to the database. :param str select: Attribute for the ``SELECT`` query. :param str table_name: |arg_select_table_name| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Result of the query execution. :rtype: sqlite3.Cursor :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error|
22,020
def chunk(self, seek=None, lenient=False): self.validate_signature() while True: if not self.atchunk: self.atchunk = self.chunklentype() length, chunk_type = self.atchunk self.atchunk = None data = self.file.read(length) if len(data) != length: raise ChunkError( % (chunk_type, length)) checksum = self.file.read(4) if len(checksum) != 4: raise ChunkError(, chunk_type) if seek and chunk_type != seek: continue verify = zlib.crc32(strtobytes(chunk_type)) verify = zlib.crc32(data, verify) verify &= 2**32 - 1 verify = struct.pack(, verify) if checksum != verify: (a, ) = struct.unpack(, checksum) (b, ) = struct.unpack(, verify) message = "Checksum error in %s chunk: 0x%08X != 0x%08X." %\ (chunk_type, a, b) if lenient: warnings.warn(message, RuntimeWarning) else: raise ChunkError(message) return chunk_type, data
Read the next PNG chunk from the input file returns a (*chunk_type*, *data*) tuple. *chunk_type* is the chunk's type as a byte string (all PNG chunk types are 4 bytes long). *data* is the chunk's data content, as a byte string. If the optional `seek` argument is specified then it will keep reading chunks until it either runs out of file or finds the chunk_type specified by the argument. Note that in general the order of chunks in PNGs is unspecified, so using `seek` can cause you to miss chunks. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions.
22,021
def _check_section_option(self, section, option): if section is None: section = self.DEFAULT_SECTION_NAME elif not is_text_string(section): raise RuntimeError("Argument must be a string") if not is_text_string(option): raise RuntimeError("Argument must be a string") return section
Private method to check section and option types
22,022
def defaultAutoRangeMethods(inspector, intialItems=None): rangeFunctions = OrderedDict({} if intialItems is None else intialItems) rangeFunctions[] = partial(inspectorDataRange, inspector, 0.0) for percentage in [0.1, 0.2, 0.5, 1, 2, 5, 10, 20]: label = "discard {}%".format(percentage) rangeFunctions[label] = partial(inspectorDataRange, inspector, percentage) return rangeFunctions
Creates an ordered dict with default autorange methods for an inspector. :param inspector: the range methods will work on (the sliced array) of this inspector. :param intialItems: will be passed on to the OrderedDict constructor.
22,023
def get_freesasa_annotations(self, include_hetatms=False, representatives_only=True, force_rerun=False): for g in tqdm(self.genes): g.protein.get_freesasa_annotations(include_hetatms=include_hetatms, representative_only=representatives_only, force_rerun=force_rerun)
Run freesasa on structures and store calculations. Annotations are stored in the protein structure's chain sequence at: ``<chain_prop>.seq_record.letter_annotations['*-freesasa']`` Args: include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``. representative_only (bool): If analysis should only be run on the representative structure force_rerun (bool): If calculations should be rerun even if an output file exists
22,024
def rec2csv(r, filename): names = r.dtype.names def translate(x): if x is None or str(x).lower == "none": x = "" return str(x) with open(filename, "w") as csv: csv.write(",".join([str(x) for x in names])+"\n") for data in r: csv.write(",".join([translate(x) for x in data])+"\n") return filename
Export a recarray *r* to a CSV file *filename*
22,025
def __process_by_python(self): self.__scores = {} for k in range(self.__kmin, self.__kmax): clusters = self.__calculate_clusters(k) if len(clusters) != k: self.__scores[k] = float() continue score = silhouette(self.__data, clusters).process().get_score() self.__scores[k] = sum(score) / len(score) if self.__scores[k] > self.__score: self.__score = self.__scores[k] self.__amount = k
! @brief Performs processing using python code.
22,026
def _prerun(self): self.check_required_params() self._set_status("RUNNING") logger.debug( "{}.PreRun: {}[{}]: running...".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump() ) ) return self.prerun()
To execute before running message
22,027
def owner(self): obj = javabridge.call(self.jobject, "getOwner", "()Lweka/core/CapabilitiesHandler;") if obj is None: return None else: return JavaObject(jobject=obj)
Returns the owner of these capabilities, if any. :return: the owner, can be None :rtype: JavaObject
22,028
def findItems(self, data, cls=None, initpath=None, **kwargs): if cls and cls.TAG and not in kwargs: kwargs[] = cls.TAG if cls and cls.TYPE and not in kwargs: kwargs[] = cls.TYPE items = [] for elem in data: if self._checkAttrs(elem, **kwargs): item = self._buildItemOrNone(elem, cls, initpath) if item is not None: items.append(item) return items
Load the specified data to find and build all items with the specified tag and attrs. See :func:`~plexapi.base.PlexObject.fetchItem` for more details on how this is used.
22,029
def create_apirack(self): return ApiRack( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of Api Rack Variables services facade.
22,030
def get_users_batch(self, ids): assert len(ids) <= 50 ids_ = .join(ids) url = _USERS_BATCH.format(c_api=_C_API_BEGINNING, api=_API_VERSION, ids=ids_, at=self.access_token) return _get_request(url)
Ids: a list of ids that we want to return
22,031
def source(self): full_src = self._source[self._slice] def is_empty_or_comment(line): return line.strip() == or line.strip().startswith() filtered_src = dropwhile(is_empty_or_comment, reversed(full_src)) return .join(reversed(list(filtered_src)))
Return the source code for the definition.
22,032
def not_(self): s query expression using MongoDBJeffJeff ret_obj = {} for k, v in self.obj.items(): if not isinstance(v, dict): ret_obj[k] = { : v } continue num_ops = len([x for x in v if x[0] == ]) if num_ops != len(v) and num_ops != 0: raise BadQueryException() if num_ops == 0: ret_obj[k] = { : v } continue for op, value in v.items(): k_dict = ret_obj.setdefault(k, {}) not_dict = k_dict.setdefault(, {}) not_dict[op] = value return QueryExpression(ret_obj)
Negates this instance's query expression using MongoDB's ``$not`` operator **Example**: ``(User.name == 'Jeff').not_()`` .. note:: Another usage is via an operator, but parens are needed to get past precedence issues: ``~ (User.name == 'Jeff')``
22,033
def minimize(grad_and_hessian_loss_fn, x_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=1, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): graph_deps = [ x_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, tolerance, learning_rate, ], with tf.compat.v1.name_scope(name, , graph_deps): def _loop_cond(x_start, converged, iter_): del x_start return tf.logical_and(iter_ < maximum_iterations, tf.logical_not(converged)) def _loop_body(x_start, converged, iter_): g, h_outer, h_middle = grad_and_hessian_loss_fn(x_start) x_start, converged, _ = minimize_one_step( gradient_unregularized_loss=g, hessian_unregularized_loss_outer=h_outer, hessian_unregularized_loss_middle=h_middle, x_start=x_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_full_sweeps=maximum_full_sweeps_per_iteration, tolerance=tolerance, learning_rate=learning_rate) return x_start, converged, iter_ + 1 return tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[ x_start, tf.zeros([], np.bool, name=), tf.zeros([], np.int32, name=), ])
Minimize using Hessian-informed proximal gradient descent. This function solves the regularized minimization problem ```none argmin{ Loss(x) + l1_regularizer * ||x||_1 + l2_regularizer * ||x||_2**2 : x in R^n } ``` where `Loss` is a convex C^2 function (typically, `Loss` is the negative log likelihood of a model and `x` is a vector of model coefficients). The `Loss` function does not need to be supplied directly, but this optimizer does need a way to compute the gradient and Hessian of the Loss function at a given value of `x`. The gradient and Hessian are often computationally expensive, and this optimizer calls them relatively few times compared with other algorithms. Args: grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor` of the same shape and dtype as `x_start` and returns the triple `(gradient_unregularized_loss, hessian_unregularized_loss_outer, hessian_unregularized_loss_middle)` as defined in the argument spec of `minimize_one_step`. x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial value of the argument to the `Loss` function. tolerance: scalar, `float` `Tensor` representing the tolerance for each optimization step; see the `tolerance` argument of `minimize_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term (see equation above). l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term (see equation above). Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying the maximum number of iterations of the outer loop of the optimizer. After this many iterations of the outer loop, the algorithm will terminate even if the return value `optimal_x` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of sweeps allowed in each iteration of the outer loop of the optimizer. Passed as the `maximum_full_sweeps` argument to `minimize_one_step`. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"minimize"`. Returns: x: `Tensor` of the same shape and dtype as `x_start`, representing the (batches of) computed values of `x` which minimizes `Loss(x)`. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged within the specified number of iterations across all batches. Here convergence means that an iteration of the inner loop (`minimize_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `minimize_one_step` before achieving convergence). #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
22,034
def to_unicode(string): assert isinstance(string, basestring) if sys.version_info[0] >= 3: if isinstance(string, bytes): return string.decode() else: return string else: if isinstance(string, str): return string.decode() else: return string
Convert a string (bytes, str or unicode) to unicode.
22,035
def _replace_placeholder_with(self, element): element._nvXxPr.nvPr._insert_ph(self._element.ph) self._element.addprevious(element) self._element.getparent().remove(self._element) self._element = None
Substitute *element* for this placeholder element in the shapetree. This placeholder's `._element` attribute is set to |None| and its original element is free for garbage collection. Any attribute access (including a method call) on this placeholder after this call raises |AttributeError|.
22,036
def connect(cls, host, public_key, private_key, verbose=0, use_cache=True): return cls(host, public_key, private_key, verbose, use_cache)
Connect the client with the given host and the provided credentials. Parameters ---------- host : str The Cytomine host (without protocol). public_key : str The Cytomine public key. private_key : str The Cytomine private key. verbose : int The verbosity level of the client. use_cache : bool True to use HTTP cache, False otherwise. Returns ------- client : Cytomine A connected Cytomine client.
22,037
def process_jwt(jwt): header, claims, _ = jwt.split() parsed_header = json_decode(base64url_decode(header)) parsed_claims = json_decode(base64url_decode(claims)) return parsed_header, parsed_claims
Process a JSON Web Token without verifying it. Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key. :param jwt: The JSON Web Token to verify. :type jwt: str or unicode :rtype: tuple :returns: ``(header, claims)``
22,038
def getAllViewsAsDict(self): return {p: self._plugins[p].get_views() for p in self._plugins}
Return all the stats views (dict).
22,039
def add_z(xy: np.ndarray, z: float) -> np.ndarray: interm = insert(xy, 2, [0, 0, 0], axis=1) xyz = insert( interm, 2, [0, 0, 1, z], axis=0) return xyz.round(11)
Turn a 2-D transform matrix into a 3-D transform matrix (scale/shift only, no rotation). :param xy: A two-dimensional transform matrix (a 3x3 numpy ndarray) in the following form: [ 1 0 x ] [ 0 1 y ] [ 0 0 1 ] :param z: a float for the z component :return: a three-dimensional transformation matrix (a 4x4 numpy ndarray) with x, y, and z from the function parameters, in the following form: [ 1 0 0 x ] [ 0 1 0 y ] [ 0 0 1 z ] [ 0 0 0 1 ]
22,040
def get_transcript_ids(ensembl, gene_id): ensembl_genes = ensembl.get_genes_for_hgnc_id(gene_id) transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, [gene_id]) alt_symbols = [] if len(transcript_ids) == 0: alt_symbols = ensembl.get_previous_symbol(gene_id) genes = [ensembl.get_genes_for_hgnc_id(symbol) for symbol in alt_symbols] genes = [item for sublist in genes for item in sublist] ensembl_genes += genes symbols = [gene_id] + alt_symbols transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, symbols) return get_transcript_lengths(ensembl, transcript_ids)
gets transcript IDs for a gene. Args: ensembl: EnsemblRequest object to request data from ensembl gene_id: HGNC symbol for gene Returns: dictionary of transcript ID: transcript lengths for all transcripts for a given HGNC symbol.
22,041
def capture(self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs): if event_type == "Exception": stack = False data = self._build_msg_for_logging( event_type, date=date, context=context, custom=custom, stack=stack, handled=handled, **kwargs ) if data: self.queue(ERROR, data, flush=not handled) return data["id"]
Captures and processes an event and pipes it off to Client.send.
22,042
def exists(self, table_id): from google.api_core.exceptions import NotFound table_ref = self.client.dataset(self.dataset_id).table(table_id) try: self.client.get_table(table_ref) return True except NotFound: return False except self.http_error as ex: self.process_http_error(ex)
Check if a table exists in Google BigQuery Parameters ---------- table : str Name of table to be verified Returns ------- boolean true if table exists, otherwise false
22,043
def build_header(self, title): header = [, + title, + self.user, , + str(self.date_created), + str(self.date_updated), , , ] self.out = header + self.out
Generate the header for the Markdown file.
22,044
def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ): assert is_valid_appname(appname) assert is_valid_keyname(keyname) fq_key_name = "gpg.%s.%s" % (appname, keyname) result = {} dead_pubkey_dict = None dead_pubkey = None key_id = None if not immutable: dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) if in dead_pubkey_dict: return dead_pubkey_dict else: dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy ) if in dead_pubkey_dict: return dead_pubkey_dict dead_pubkey_kv = dead_pubkey_dict[] assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ] key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir ) assert key_id is not None, "Failed to load pubkey fingerprint" if not immutable: result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) else: result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy ) if in result: return result try: rc = gpg_unstash_key( appname, key_id, config_dir=config_dir ) assert rc, "Failed to unstash key" except: log.warning("Failed to remove private key for " % key_id ) result[] = "Failed to remove private key" if os.environ.get() is not None: raise return result
Remove an application GPG key. Unstash the local private key. Return {'status': True, ...} on success Return {'error': ...} on error If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take on the order of an hour to complete on the blockchain. A transaction ID will be returned to you on successful deletion, and it will be up to you to wait for the transaction to get confirmed.
22,045
def _timedatectl(): ret = __salt__[]([], python_shell=False) if ret[] != 0: msg = .format(ret[]) raise CommandExecutionError(msg) return ret
get the output of timedatectl
22,046
def _lincomb(self, a, x1, b, x2, out): self.tspace._lincomb(a, x1.tensor, b, x2.tensor, out.tensor)
Raw linear combination.
22,047
def format2(self, raw, out = None, scheme = ): string_output = 0 if out == or self.out == or \ isinstance(self.out,StringIO.StringIO): out_old = self.out self.out = StringIO.StringIO() string_output = 1 elif out is not None: self.out = out if scheme == : error = False self.out.write(raw) if string_output: return raw,error else: return None,error colors = self.color_table[scheme].colors self.colors = colors self.raw = raw.expandtabs().rstrip() self.lines = [0, 0] pos = 0 raw_find = self.raw.find lines_append = self.lines.append while 1: pos = raw_find(, pos) + 1 if not pos: break lines_append(pos) lines_append(len(self.raw)) self.pos = 0 text = StringIO.StringIO(self.raw) error = False try: for atoken in generate_tokens(text.readline): self(*atoken) except tokenize.TokenError as ex: msg = ex.args[0] line = ex.args[1][0] self.out.write("%s\n\n*** ERROR: %s%s%s\n" % (colors[token.ERRORTOKEN], msg, self.raw[self.lines[line]:], colors.normal) ) error = True self.out.write(colors.normal+) if string_output: output = self.out.getvalue() self.out = out_old return (output, error) return (None, error)
Parse and send the colored source. If out and scheme are not specified, the defaults (given to constructor) are used. out should be a file-type object. Optionally, out can be given as the string 'str' and the parser will automatically return the output in a string.
22,048
def _ref_prop_matches(prop, target_classname, ref_classname, resultclass_names, role): assert prop.type == if prop.reference_class.lower() == target_classname.lower(): if resultclass_names and ref_classname not in resultclass_names: return False if role and prop.name.lower() != role: return False return True return False
Test filters for a reference property Returns `True` if matches the criteria. Returns `False` if it does not match. The match criteria are: - target_classname == prop_reference_class - if result_classes are not None, ref_classname is in result_classes - If role is not None, prop name matches role
22,049
def _make_tuple(self, env): t = runtime.Tuple(self, env, dict2tuple) schema = schema_spec_from_tuple(t) t.attach_schema(schema) return t
Instantiate the Tuple based on this TupleNode.
22,050
def select(self): if sys.platform==: self.dc.SelectObject(self.bitmap) self.IsSelected = True
Select the current bitmap into this wxDC instance
22,051
def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): volume_nameinstance_idinstance_namere creating the volume from a snapshot and donvolume_namevolume_namevolume_namevolume_namevolume_namevolume_name ret = {: name, : True, : , : {}} old_dict = {} new_dict = {} running_states = (, ) if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of , , " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of , or " " must be provided.") if device is None: raise SaltInvocationError("Parameter is required.") args = {: region, : key, : keyid, : profile} if instance_name: instance_id = __salt__[]( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError(.format(instance_name)) instances = __salt__[](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({: volume_name}) vols = __salt__[](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, cantestcommentThe volume with name {0} is set to be created and attached on {1}({2}).resultboto_ec2.create_volumeresultresultError creating volume with name {0}.boto_ec2.set_volumes_tagsfiltersvolume_idstagsNamesuccessError updating requested volume {0} with name {1}. {2}commentvolume_idvolume_idboto_ec2.get_all_volumesVolume {0} do not existVolume {0} in {1} cannot attach to instance {2} in {3}.commentThe volume {0} is attached on {1}({2}).testcommentThe volume {0} is set to be detached from {1}({2} and attached on {3}({4}).resultboto_ec2.detach_volumecommentVolume {0} is detached from {1}({2}).instance_iddeviceThe volume {0} is already attached on instance {1}({2}). Failed to detachinstance_iddevicetestcommentThe volume {0} is set to be attached on {1}({2}).resultboto_ec2.attach_volumecomment commentVolume {0} is attached on {1}({2}).instance_iddevicechangesoldnewcommentError attaching volume {0} to instance {1}({2}).result'] = False return ret
Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
22,052
def parse( data = None, template = None, data_file = None, template_file = None, interp = None, debug = False, predefines = True, int3 = True, keep_successful = False, printf = True, ): if data is None and data_file is None: raise Exception("No input data was specified") if data is not None and data_file is not None: raise Exception("Only one input data may be specified") if isinstance(data, six.string_types): data = six.StringIO(data) if data_file is not None: data = open(os.path.expanduser(data_file), "rb") if template is None and template_file is None: raise Exception("No template specified!") if template is not None and template_file is not None: raise Exception("Only one template may be specified!") orig_filename = "string" if template_file is not None: orig_filename = template_file try: with open(os.path.expanduser(template_file), "r") as f: template = f.read() except Exception as e: raise Exception("Could not open template file ".format(template_file)) if interp is None: interp = pfp.interp.PfpInterp( debug = debug, parser = PARSER, int3 = int3, ) data = BitwrappedStream(data) dom = interp.parse( data, template, predefines = predefines, orig_filename = orig_filename, keep_successful = keep_successful, printf = printf, ) if data_file is not None: data.close() return dom
Parse the data stream using the supplied template. The data stream WILL NOT be automatically closed. :data: Input data, can be either a string or a file-like object (StringIO, file, etc) :template: template contents (str) :data_file: PATH to the data to be used as the input stream :template_file: template file path :interp: the interpretor to be used (a default one will be created if ``None``) :debug: if debug information should be printed while interpreting the template (false) :predefines: if built-in type information should be inserted (true) :int3: if debugger breaks are allowed while interpreting the template (true) :keep_successful: return any succesfully parsed data instead of raising an error. If an error occurred and ``keep_successful`` is True, then ``_pfp__error`` will be contain the exception object :printf: if ``False``, all calls to ``Printf`` (:any:`pfp.native.compat_interface.Printf`) will be noops. (default=``True``) :returns: pfp DOM
22,053
def get_current_price(crypto, fiat, services=None, convert_to=None, helper_prices=None, **modes): fiat = fiat.lower() args = {: crypto, : fiat, : convert_to} if not services: services = get_optimal_services(crypto, ) if fiat in services: try_services = services[fiat] result = _try_price_fetch(try_services, args, modes) if not isinstance(result, Exception): return result if in services: try_services = services[] result = _try_price_fetch(try_services, args, modes) if not isinstance(result, Exception): return result def _do_composite_price_fetch(crypto, convert_crypto, fiat, helpers, modes): before = modes.get(, False) modes[] = True services1, converted_price = get_current_price(crypto, convert_crypto, **modes) if not helpers or convert_crypto not in helpers[fiat]: services2, fiat_price = get_current_price(convert_crypto, fiat, **modes) else: services2, fiat_price = helpers[fiat][convert_crypto] modes[] = before if modes.get(, False): serv = CompositeService(services1, services2, convert_crypto) return [serv], converted_price * fiat_price else: return converted_price * fiat_price all_composite_cryptos = [, , , ] if crypto in all_composite_cryptos: all_composite_cryptos.remove(crypto) for composite_attempt in all_composite_cryptos: if composite_attempt in services and services[composite_attempt]: result = _do_composite_price_fetch( crypto, composite_attempt, fiat, helper_prices, modes ) if not isinstance(result, Exception): return result raise result
High level function for getting current exchange rate for a cryptocurrency. If the fiat value is not explicitly defined, it will try the wildcard service. if that does not work, it tries converting to an intermediate cryptocurrency if available.
22,054
def add_email_address(self, email, hidden=None): existing_emails = get_value(self.obj, , []) found_email = next( (existing_email for existing_email in existing_emails if existing_email.get() == email), None ) if found_email is None: new_email = {: email} if hidden is not None: new_email[] = hidden self._append_to(, new_email) elif hidden is not None: found_email[] = hidden
Add email address. Args: :param email: email of the author. :type email: string :param hidden: if email is public or not. :type hidden: boolean
22,055
def brokers(self): return set(self._brokers.values()) or set(self._bootstrap_brokers.values())
Get all BrokerMetadata Returns: set: {BrokerMetadata, ...}
22,056
def bfx(value, msb, lsb): mask = bitmask((msb, lsb)) return (value & mask) >> lsb
! @brief Extract a value from a bitfield.
22,057
def _to_bel_lines_header(graph) -> Iterable[str]: yield .format( VERSION, bel_resources.constants.VERSION, time.asctime() ) yield from make_knowledge_header( namespace_url=graph.namespace_url, namespace_patterns=graph.namespace_pattern, annotation_url=graph.annotation_url, annotation_patterns=graph.annotation_pattern, annotation_list=graph.annotation_list, **graph.document )
Iterate the lines of a BEL graph's corresponding BEL script's header. :param pybel.BELGraph graph: A BEL graph
22,058
def _lu_reconstruct_assertions(lower_upper, perm, validate_args): assertions = [] message = if lower_upper.shape.ndims is not None: if lower_upper.shape.ndims < 2: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank_at_least(lower_upper, rank=2, message=message)) message = if lower_upper.shape.ndims is not None and perm.shape.ndims is not None: if lower_upper.shape.ndims != perm.shape.ndims + 1: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_rank( lower_upper, rank=tf.rank(perm) + 1, message=message)) message = if lower_upper.shape[:-2].is_fully_defined(): if lower_upper.shape[-2] != lower_upper.shape[-1]: raise ValueError(message) elif validate_args: m, n = tf.split(tf.shape(input=lower_upper)[-2:], num_or_size_splits=2) assertions.append(tf.compat.v1.assert_equal(m, n, message=message)) return assertions
Returns list of assertions related to `lu_reconstruct` assumptions.
22,059
def is_model_mpttmeta_subclass(node): if node.name != or not isinstance(node.parent, ClassDef): return False parents = (, , , , , ) return node_is_subclass(node.parent, *parents)
Checks that node is derivative of MPTTMeta class.
22,060
def _remap_input(self, operation, path, *args, **kw): if operation in self.write_ops and not self._ok(path): self._violation(operation, os.path.realpath(path), *args, **kw) return path
Called for path inputs
22,061
def from_path_by_criterion(dir_path, criterion, keepboth=False): if keepboth: fc_yes, fc_no = FileCollection(), FileCollection() for winfile in FileCollection.yield_all_winfile(dir_path): if criterion(winfile): fc_yes.files.setdefault(winfile.abspath, winfile) else: fc_no.files.setdefault(winfile.abspath, winfile) return fc_yes, fc_no else: fc = FileCollection() for winfile in FileCollection.yield_all_winfile(dir_path): if criterion(winfile): fc.files.setdefault(winfile.abspath, winfile) return fc
Create a new FileCollection, and select some files from ``dir_path``. How to construct your own criterion function:: def filter_image(winfile): if winfile.ext in [".jpg", ".png", ".bmp"]: return True else: return False fc = FileCollection.from_path_by_criterion(dir_path, filter_image) :param dir_path: path of a directory :type dir_path: string :param criterion: customize filter function :type criterion: function :param keepboth: if True, returns two file collections, one is files with criterion=True, another is False. :type keepboth: boolean **中文文档** 直接选取dir_path目录下所有文件, 根据criterion中的规则, 生成 FileCollection。
22,062
def pkt_check(*args, func=None): func = func or inspect.stack()[2][3] for var in args: dict_check(var, func=func) dict_check(var.get(), func=func) enum_check(var.get(), func=func) real_check(var.get(), func=func) ip_check(var.get(), var.get(), func=func) bool_check(var.get(), var.get(), func=func) int_check(var.get(), var.get(), var.get(), func=func)
Check if arguments are valid packets.
22,063
def fit(self, index, n_nodes, tau_matrix, previous_tree, edges=None): self.level = index + 1 self.n_nodes = n_nodes self.tau_matrix = tau_matrix self.previous_tree = previous_tree self.edges = edges or [] if not self.edges: if self.level == 1: self.u_matrix = previous_tree self._build_first_tree() else: self._build_kth_tree() self.prepare_next_tree() self.fitted = True
Fits tree object. Args: :param index: index of the tree :param n_nodes: number of nodes in the tree :tau_matrix: kendall's tau matrix of the data :previous_tree: tree object of previous level :type index: int :type n_nodes: int :type tau_matrix: np.ndarray of size n_nodes*n_nodes
22,064
def validate_empty_values(self, data): if self.read_only: return (True, self.get_default()) if data is empty: if getattr(self.root, , False): raise SkipField() if self.required: self.fail() return (True, self.get_default()) if data is None: if not self.allow_null: self.fail() return (True, None) return (False, data)
Validate empty values, and either: * Raise `ValidationError`, indicating invalid data. * Raise `SkipField`, indicating that the field should be ignored. * Return (True, data), indicating an empty value that should be returned without any further validation being applied. * Return (False, data), indicating a non-empty value, that should have validation applied as normal.
22,065
def collect_gaps(blast, use_subject=False): key = lambda x: x.sstart if use_subject else x.qstart blast.sort(key=key) for a, b in zip(blast, blast[1:]): if use_subject: if a.sstop < b.sstart: yield b.sstart - a.sstop else: if a.qstop < b.qstart: yield b.qstart - a.qstop
Collect the gaps between adjacent HSPs in the BLAST file.
22,066
def activate(request, activation_key, template_name=, success_url=None, extra_context=None): user = AccountsSignup.objects.activate_user(activation_key) if user: auth_user = authenticate(identification=user.email, check_password=False) login(request, auth_user) if accounts_settings.ACCOUNTS_USE_MESSAGES: messages.success(request, _(), fail_silently=True) if success_url: redirect_to = success_url % {: user.username} else: redirect_to = reverse(, kwargs={: user.username}) return redirect(redirect_to) else: if not extra_context: extra_context = dict() return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
Activate a user with an activation key. The key is a SHA1 string. When the SHA1 is found with an :class:`AccountsSignup`, the :class:`User` of that account will be activated. After a successful activation the view will redirect to ``success_url``. If the SHA1 is not found, the user will be shown the ``template_name`` template displaying a fail message. :param activation_key: String of a SHA1 string of 40 characters long. A SHA1 is always 160bit long, with 4 bits per character this makes it --160/4-- 40 characters long. :param template_name: String containing the template name that is used when the ``activation_key`` is invalid and the activation fails. Defaults to ``accounts/activation_fail.html``. :param success_url: String containing the URL where the user should be redirected to after a successful activation. Will replace ``%(username)s`` with string formatting if supplied. If ``success_url`` is left empty, will direct to ``accounts_profile_detail`` view. :param extra_context: Dictionary containing variables which could be added to the template context. Default to an empty dictionary.
22,067
def newDocText(self, content): ret = libxml2mod.xmlNewDocText(self._o, content) if ret is None:raise treeError() __tmp = xmlNode(_obj=ret) return __tmp
Creation of a new text node within a document.
22,068
def create_info_endpoint(self, name, data): data = make_serializable(data) class InfoBase(Resource): @staticmethod def get(): return data def info_factory(name): class NewClass(InfoBase): pass NewClass.__name__ = "{}_{}".format(name, InfoBase.__name__) return NewClass path = .format(name) self.api.add_resource(info_factory(name), path) logger.info(.format(path)) logger.debug(.format(path, data))
Create an endpoint to serve info GET requests.
22,069
async def set_tz(self): settings = await self.api.account.settings.get() tz = settings.time_zone.tzinfo_name os.environ[] = tz time.tzset()
set the environment timezone to the timezone set in your twitter settings
22,070
def _set_opts(self, schema=None, **options): if schema is not None: self.schema(schema) for k, v in options.items(): if v is not None: self.option(k, v)
Set named options (filter out those the value is None)
22,071
def compare(string1, string2): if len(string1) != len(string2): return False result = True for c1, c2 in izip(string1, string2): result &= c1 == c2 return result
Compare two strings while protecting against timing attacks :param str string1: the first string :param str string2: the second string :returns: True if the strings are equal, False if not :rtype: :obj:`bool`
22,072
def get_cost_per_mol(self, comp): comp = comp if isinstance(comp, Composition) else Composition(comp) decomp = self.get_lowest_decomposition(comp) return sum(k.energy_per_atom * v * comp.num_atoms for k, v in decomp.items())
Get best estimate of minimum cost/mol based on known data Args: comp: Composition as a pymatgen.core.structure.Composition Returns: float of cost/mol
22,073
def prune_neighbors(self): def _neighbor_check(neighbors,valid): if not neighbors==neighbors: return np.nan valid_keys = set(valid)&set(neighbors.keys()) d = dict([(k,v) for k,v in neighbors.items() if k in valid_keys]) return d fixed = self.copy() valid = self.get_valid_cell_indecies() valid = pd.DataFrame(self).merge(valid,on=self.frame_columns).set_index(self.frame_columns+[]) valid = valid.apply(lambda x: _neighbor_check(x[],x[]),1).reset_index().\ rename(columns={0:}) fixed = fixed.merge(valid,on=self.frame_columns+[]).drop(columns=).\ rename(columns={:}) fixed.microns_per_pixel = self.microns_per_pixel fixed.db = self.db return fixed
If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset. This prunes those no-longer existant connections. Returns: CellDataFrame: A CellDataFrame with only valid cell-cell contacts
22,074
def html_error_template(): import mako.template return mako.template.Template(r, output_encoding=sys.getdefaultencoding(), encoding_errors=)
Provides a template that renders a stack trace in an HTML format, providing an excerpt of code as well as substituting source template filenames, line numbers and code for that of the originating source template, as applicable. The template's default ``encoding_errors`` value is ``'htmlentityreplace'``. The template has two options. With the ``full`` option disabled, only a section of an HTML document is returned. With the ``css`` option disabled, the default stylesheet won't be included.
22,075
def data(self, index, role): if role == Qt.DisplayRole and \ index.row() < len(self.words): text = self.words[index.row()] typed = text[:len(self._typedText)] canComplete = text[len(self._typedText):len(self._typedText) + len(self.canCompleteText)] rest = text[len(self._typedText) + len(self.canCompleteText):] if canComplete: else: return typed + rest else: return None
QAbstractItemModel method implementation
22,076
def kill_pane(self, pane): assert isinstance(pane, Pane) if not pane.process.is_terminated: pane.process.kill() self.arrangement.remove_pane(pane)
Kill the given pane, and remove it from the arrangement.
22,077
def _getconf(self, rscpath, logger=None, conf=None): result = None resource = self.pathresource(rscpath=rscpath, logger=logger) if resource is not None: for cname in self._cnames(resource=resource): category = Category(name=cname) if result is None: result = Configuration() result += category for param in self._params(resource=resource, cname=cname): if conf is not None: confparam = None if cname in conf and param.name in conf[cname]: confparam = conf[cname][param.name] else: confparam = conf.param(pname=param.name) if confparam is not None: svalue = param.svalue param.update(confparam) if svalue is not None: param.svalue = svalue param.resolve() category += param return result
Get specific conf from one driver path. :param str rscpath: resource path. :param Logger logger: logger to use.
22,078
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.mturk.connection import MTurkConnection return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.mturk.connection.MTurkConnection` :return: A connection to MTurk
22,079
def delete_editor(userid): url = _make_del_account_url(userid) return _process_resp(url, get_sea_resource(url), _is_editor_deleted )
:param userid: a string representing the user's UW NetID :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned.
22,080
def send_to_observer(self, msg, frm): logger.debug("{} sending message to observer: {}". format(self, (msg, frm))) self._observer.append_input(msg, frm)
Send the message to the observer. :param msg: the message to send :param frm: the name of the node which sent this `msg`
22,081
def subsample(self, proposals, targets): labels, regression_targets = self.prepare_targets(proposals, targets) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) proposals = list(proposals) for labels_per_image, regression_targets_per_image, proposals_per_image in zip( labels, regression_targets, proposals ): proposals_per_image.add_field("labels", labels_per_image) proposals_per_image.add_field( "regression_targets", regression_targets_per_image ) for img_idx, (pos_inds_img, neg_inds_img) in enumerate( zip(sampled_pos_inds, sampled_neg_inds) ): img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1) proposals_per_image = proposals[img_idx][img_sampled_inds] proposals[img_idx] = proposals_per_image self._proposals = proposals return proposals
This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList])
22,082
def get_item_project(self, eitem): project = None eitem_project = {} ds_name = self.get_connector_name() if ds_name not in self.prjs_map: return eitem_project for tag in eitem[]: tags2project = CaseInsensitiveDict(self.prjs_map[ds_name]) if tag in tags2project: project = tags2project[tag] break if project is None: project = DEFAULT_PROJECT eitem_project = {"project": project} eitem_project.update(self.add_project_levels(project)) return eitem_project
Get project mapping enrichment field. Twitter mappings is pretty special so it needs a special implementacion.
22,083
def ListNames(self): if not self.IsDirectory(): return if self.hive is None: for name in dir(winreg): if name.startswith("HKEY_"): yield name return try: with OpenKey(self.hive, self.local_path) as key: (self.number_of_keys, self.number_of_values, self.last_modified) = QueryInfoKey(key) found_keys = set() for i in range(self.number_of_keys): try: key_name = EnumKey(key, i) found_keys.add(key_name) yield key_name except OSError: pass for i in range(self.number_of_values): try: name, unused_value, unused_value_type = EnumValue(key, i) if name not in found_keys: yield name except OSError: pass except OSError as e: raise IOError("Unable to list key %s: %s" % (self.key_name, e))
List the names of all keys and values.
22,084
def print_help(self): print( % (self._title or self._name, self._version or )) if self._usage: print( % self._usage) else: cmd = self._name if hasattr(self, ) and isinstance(self._parent, Command): cmd = % (self._parent._name, cmd) if self._command_list: usage = % cmd else: usage = % cmd pos = .join([ % name for name in self._positional_list]) print( % (usage, pos)) arglen = max(len(o.name) for o in self._option_list) arglen += 2 self.print_title() for o in self._option_list: print( % (_pad(o.name, arglen), o.description or )) print() if self._command_list: self.print_title() for cmd in self._command_list: if isinstance(cmd, Command): name = _pad(cmd._name, arglen) desc = cmd._description or print( % (_pad(name, arglen), desc)) print() if self._help_footer: print(self._help_footer) print() return self
Print the help menu.
22,085
def _complete_type_chain(self, symbol, fullsymbol): target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol) if target is None: return {} result = {} if symbol != "": if self.context.el_call != "sub": for mkey in target.members: if self._symbol_in(symbol, mkey): result[mkey] = target.members[mkey] for ekey in target.executables: if (self._symbol_in(symbol, ekey)): if self.context.el_call == "sub": if (isinstance(target.executables[ekey], Subroutine)): result[ekey] = target.executables[ekey] else: if (isinstance(target.executables[ekey], Function)): result[ekey] = target.executables[ekey] else: if self.context.el_call != "sub": result.update(target.members) subdict = {k: target.executables[k] for k in target.executables if isinstance(target.executables[k].target, Function)} result.update(subdict) else: subdict = {k: target.executables[k] for k in target.executables if isinstance(target.executables[k].target, Subroutine)} result.update(subdict) return result
Suggests completion for the end of a type chain.
22,086
def device_query_update(self, query_id, body, **kwargs): kwargs[] = True if kwargs.get(): return self.device_query_update_with_http_info(query_id, body, **kwargs) else: (data) = self.device_query_update_with_http_info(query_id, body, **kwargs) return data
Update a device query # noqa: E501 Update a specifc device query. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_query_update(query_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str query_id: (required) :param DeviceQueryPostPutRequest body: Device query update object. (required) :return: DeviceQuery If the method is called asynchronously, returns the request thread.
22,087
def compute_Pi_V_given_J(self, CDR3_seq, V_usage_mask, J_usage_mask): Pi_V_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in J_usage_mask] alignment_lengths = [] for V_in in V_usage_mask: try: cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in] except IndexError: print continue current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg) alignment_lengths += [current_alignment_length] current_Pi_V = np.zeros((4, len(CDR3_seq)*3)) if current_alignment_length > 0: current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length] for pos in range(1, current_alignment_length, 3): current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos] for j, J_in in enumerate(J_usage_mask): Pi_V_given_J[j][:, :current_alignment_length] += self.PVJ[V_in, J_in]*current_Pi_V[:, :current_alignment_length] return Pi_V_given_J, max(alignment_lengths)
Compute Pi_V conditioned on J. This function returns the Pi array from the model factors of the V genomic contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}. For clarity in parsing the algorithm implementation, we include which instance attributes are used in the method as 'parameters.' Parameters ---------- CDR3_seq : str CDR3 sequence composed of 'amino acids' (single character symbols each corresponding to a collection of codons as given by codons_dict). V_usage_mask : list Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list Indices of the J alleles to be considered in the Pgen computation self.cutV_genomic_CDR3_segs : list of strings List of all the V genomic nucleotide sequences trimmed to begin at the conserved C residue and with the maximum number of palindromic insertions appended. self.PVdelV_nt_pos_vec : list of ndarrays For each V allele, format P(delV|V) into the correct form for a Pi array or V(J)_{x_1}. This is only done for the first and last position in each codon. self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts For each V allele, and each 'amino acid', format P(V)*P(delV|V) for positions in the middle of a codon into the correct form for a Pi array or V(J)_{x_1} given the 'amino acid'. self.PVJ : ndarray Joint probability distribution of V and J, P(V, J). Returns ------- Pi_V_given_J : list List of (4, 3L) ndarrays corresponding to V(J)_{x_1}. max_V_align: int Maximum alignment of the CDR3_seq to any genomic V allele allowed by V_usage_mask.
22,088
def validate_url(self, url): url_path = to_bytes_safe(url.path) url_path = urllib.parse.quote(url_path, safe=b"/%") url_query = to_bytes_safe(url.query) url_query = urllib.parse.quote(url_query, safe=b"?=&") url = urllib.parse.ParseResult(url.scheme, url.netloc, url_path, url.params, url_query, url.fragment) has_hostname = url.hostname is not None and len(url.hostname) > 0 has_http_scheme = url.scheme in ("http", "https") has_path = not len(url.path) or url.path.startswith("/") if not (has_hostname and has_http_scheme and has_path): raise NotSupported("invalid url: %s" % repr(url)) return url
Validate the :class:`~urllib.parse.ParseResult` object. This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url` could work as expected even meet a unexpected URL string. :param url: the parsed url. :type url: :class:`~urllib.parse.ParseResult`
22,089
def _persisted_last_epoch(self) -> int: epoch_number = 0 self._make_sure_dir_exists() for x in os.listdir(self.model_config.checkpoint_dir()): match = re.match(, x) if match: idx = int(match[1]) if idx > epoch_number: epoch_number = idx return epoch_number
Return number of last epoch already calculated
22,090
def init(FILE): try: cfg.read(FILE) global _loaded _loaded = True except: file_not_found_message(FILE)
Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str
22,091
def list_themes(directory=None): repo = require_repo(directory) path = os.path.join(repo, themes_dir) return os.listdir(path) if os.path.isdir(path) else None
Gets a list of the installed themes.
22,092
def fire_lasers( self, modules: Optional[List[str]] = None, verbose_report: bool = False, transaction_count: Optional[int] = None, ) -> Report: all_issues = [] SolverStatistics().enabled = True exceptions = [] for contract in self.contracts: StartTime() try: sym = SymExecWrapper( contract, self.address, self.strategy, dynloader=DynLoader( self.eth, storage_loading=self.onchain_storage_access, contract_loading=self.dynld, ), max_depth=self.max_depth, execution_timeout=self.execution_timeout, create_timeout=self.create_timeout, transaction_count=transaction_count, modules=modules, compulsory_statespace=False, enable_iprof=self.enable_iprof, ) issues = fire_lasers(sym, modules) except KeyboardInterrupt: log.critical("Keyboard Interrupt") issues = retrieve_callback_issues(modules) except Exception: log.critical( "Exception occurred, aborting analysis. Please report this issue to the Mythril GitHub page.\n" + traceback.format_exc() ) issues = retrieve_callback_issues(modules) exceptions.append(traceback.format_exc()) for issue in issues: issue.add_code_info(contract) all_issues += issues log.info("Solver statistics: \n{}".format(str(SolverStatistics()))) source_data = Source() source_data.get_source_from_contracts_list(self.contracts) report = Report(verbose_report, contracts=self.contracts, exceptions=exceptions) for issue in all_issues: report.append_issue(issue) return report
:param modules: The analysis modules which should be executed :param verbose_report: Gives out the transaction sequence of the vulnerability :param transaction_count: The amount of transactions to be executed :return: The Report class which contains the all the issues/vulnerabilities
22,093
def finalize(self): super(TransposedConsumer, self).finalize() self.result = map(list, zip(*self.result))
finalize for PathConsumer
22,094
def handler(event, context): records = deserialize_records(event[]) capture_update_records(update_records)
Historical {{cookiecutter.technology_name}} event collector. This collector is responsible for processing Cloudwatch events and polling events.
22,095
def get_mac(self): s mac address. 16sH14s\x0016sH14s6B8x%02X' % i for i in mac])
Obtain the device's mac address.
22,096
def setPermanences(self, segments, presynapticCellsBySource, permanence): permanences = np.repeat(np.float32(permanence), len(segments)) for source, connections in self.connectionsBySource.iteritems(): if source in presynapticCellsBySource: connections.matrix.setElements(segments, presynapticCellsBySource[source], permanences)
Set the permanence of a specific set of synapses. Any synapses that don't exist will be initialized. Any existing synapses will be overwritten. Conceptually, this method takes a list of [segment, presynapticCell] pairs and initializes their permanence. For each segment, one synapse is added (although one might be added for each "source"). To add multiple synapses to a segment, include it in the list multiple times. The total number of affected synapses is len(segments)*number_of_sources*1. @param segments (numpy array) One segment for each synapse that should be added @param presynapticCellsBySource (dict of numpy arrays) One presynaptic cell for each segment. Example: {"customInputName1": np.array([42, 69])} @param permanence (float) The permanence to assign the synapse
22,097
def load_indexed_audio(self, indexed_audio_file_abs_path): with open(indexed_audio_file_abs_path, "rb") as f: self.__timestamps = pickle.load(f)
Parameters ---------- indexed_audio_file_abs_path : str
22,098
def forward(self, inputs, context, inference=False): self.inference = inference enc_context, enc_len, hidden = context hidden = self.init_hidden(hidden) x = self.embedder(inputs) x, h, attn, scores = self.att_rnn(x, hidden[0], enc_context, enc_len) self.append_hidden(h) x = torch.cat((x, attn), dim=2) x = self.dropout(x) x, h = self.rnn_layers[0](x, hidden[1]) self.append_hidden(h) for i in range(1, len(self.rnn_layers)): residual = x x = torch.cat((x, attn), dim=2) x = self.dropout(x) x, h = self.rnn_layers[i](x, hidden[i + 1]) self.append_hidden(h) x = x + residual x = self.classifier(x) hidden = self.package_hidden() return x, scores, [enc_context, enc_len, hidden]
Execute the decoder. :param inputs: tensor with inputs to the decoder :param context: state of encoder, encoder sequence lengths and hidden state of decoder's LSTM layers :param inference: if True stores and repackages hidden state
22,099
def search_kv_store(self, key): data = { : , : key } return self.post_json(self.make_url("/useragent-kv"), data)[]
Search for a key in the key-value store. :param key: string :rtype: string