Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
385,100
def idxstats(in_bam, data): index(in_bam, data["config"], check_timestamp=False) AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"]) samtools = config_utils.get_program("samtools", data["config"]) idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode() out = [] for line in idxstats_out.split("\n"): if line.strip(): contig, length, aligned, unaligned = line.split("\t") out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned))) return out
Return BAM index stats for the given file, using samtools idxstats.
385,101
def handle_jmespath_query(self, args): continue_flag = False query_symbol = SELECT_SYMBOL[] symbol_len = len(query_symbol) try: if len(args) == 1: if args[0] == query_symbol: result = self.last.result elif args[0].startswith(query_symbol): result = jmespath.search(args[0][symbol_len:], self.last.result) print(json.dumps(result, sort_keys=True, indent=2), file=self.output) elif args[0].startswith(query_symbol): print(("Usage Error: " + os.linesep + "1. Use {0} stand-alone to display previous result with optional filtering " "(Ex: {0}[jmespath query])" + os.linesep + "OR:" + os.linesep + "2. Use {0} to query the previous result for argument values " "(Ex: group show --name {0}[jmespath query])").format(query_symbol), file=self.output) else: def jmespath_query(match): if match.group(0) == query_symbol: return str(self.last.result) query_result = jmespath.search(match.group(0)[symbol_len:], self.last.result) return str(query_result) def sub_result(arg): escaped_symbol = re.escape(query_symbol) return json.dumps(re.sub(r % escaped_symbol, jmespath_query, arg)) cmd_base = .join(map(sub_result, args)) self.cli_execute(cmd_base) continue_flag = True except (jmespath.exceptions.ParseError, CLIError) as e: print("Invalid Query Input: " + str(e), file=self.output) continue_flag = True return continue_flag
handles the jmespath query for injection or printing
385,102
def _parse_engine(self): if self._parser.has_option(, ): engine = str(self._parser.get(, )) else: engine = ENGINE_DROPBOX assert isinstance(engine, str) if engine not in [ENGINE_DROPBOX, ENGINE_GDRIVE, ENGINE_COPY, ENGINE_ICLOUD, ENGINE_BOX, ENGINE_FS]: raise ConfigError(.format(engine)) return str(engine)
Parse the storage engine in the config. Returns: str
385,103
def do_a(self, line): index, value_string = self.index_and_value_from_line(line) if index and value_string: try: self.application.apply_update(opendnp3.Analog(float(value_string)), index) except ValueError: print()
Send the Master an AnalogInput (group 32) value. Command syntax is: a index value
385,104
def p_var_decl(p): for vardata in p[2]: SYMBOL_TABLE.declare_variable(vardata[0], vardata[1], p[3]) p[0] = None
var_decl : DIM idlist typedef
385,105
def _check_args(logZ, f, x, samples, weights): if logZ is None: logZ = [0] f = [f] samples = [samples] weights = [weights] logZ = numpy.array(logZ, dtype=) if len(logZ.shape) is not 1: raise ValueError("logZ should be a 1D array") x = numpy.array(x, dtype=) if len(x.shape) is not 1: raise ValueError("x should be a 1D array") if len(logZ) != len(f): raise ValueError("len(logZ) = %i != len(f)= %i" % (len(logZ), len(f))) for func in f: if not callable(func): raise ValueError("first argument f must be function" "(or list of functions) of two variables") if len(logZ) != len(samples): raise ValueError("len(logZ) = %i != len(samples)= %i" % (len(logZ), len(samples))) samples = [numpy.array(s, dtype=) for s in samples] for s in samples: if len(s.shape) is not 2: raise ValueError("each set of samples should be a 2D array") if len(logZ) != len(weights): raise ValueError("len(logZ) = %i != len(weights)= %i" % (len(logZ), len(weights))) weights = [numpy.array(w, dtype=) if w is not None else numpy.ones(len(s), dtype=) for w, s in zip(weights, samples)] for w, s in zip(weights, samples): if len(w.shape) is not 1: raise ValueError("each set of weights should be a 1D array") if len(w) != len(s): raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w))) return logZ, f, x, samples, weights
Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`. Parameters ---------- f, x, samples, weights: see arguments for :func:`fgivenx.drivers.compute_samples`
385,106
def flag_to_list(flagval, flagtype): if flagtype == : return [int(_) for _ in flagval.split() if _] elif flagtype == : return [float(_) for _ in flagval.split() if _] elif flagtype == : return [_ for _ in flagval.split() if _] else: raise Exception("incorrect type")
Convert a string of comma-separated tf flags to a list of values.
385,107
def save(self): self._check_custom_fields() if not self._changes: return None for tag in self._remap_to_id: self._remap_tag_to_tag_id(tag, self._changes) for tag, type in self._field_type.items(): try: raw_data = self._changes[tag] except: continue if type == : try: self._changes[tag] = raw_data.strftime() except AttributeError: continue if type == : try: self._changes[tag] = raw_data.strftime() except AttributeError: continue try: self._update(self._changes) except: raise else: self._changes.clear()
Save all changes on this item (if any) back to Redmine.
385,108
def get_relation_cnt(self): ctr = cx.Counter() for ntgpad in self.associations: if ntgpad.Extension is not None: ctr += ntgpad.Extension.get_relations_cnt() return ctr
Return a Counter containing all relations contained in the Annotation Extensions.
385,109
def _build_loss(self, lstm_outputs): batch_size = self.options[] unroll_steps = self.options[] n_tokens_vocab = self.options[] def _get_next_token_placeholders(suffix): name = + suffix id_placeholder = tf.placeholder(DTYPE_INT, shape=(batch_size, unroll_steps), name=name) return id_placeholder self.next_token_id = _get_next_token_placeholders() if self.bidirectional: self.next_token_id_reverse = _get_next_token_placeholders( ) softmax_dim = self.options[][] if self.share_embedding_softmax: self.softmax_W = self.embedding_weights with tf.variable_scope(), tf.device(): softmax_init = tf.random_normal_initializer(0.0, 1.0 / np.sqrt(softmax_dim)) if not self.share_embedding_softmax: self.softmax_W = tf.get_variable( , [n_tokens_vocab, softmax_dim], dtype=DTYPE, initializer=softmax_init ) self.softmax_b = tf.get_variable( , [n_tokens_vocab], dtype=DTYPE, initializer=tf.constant_initializer(0.0)) self.individual_train_losses = [] self.individual_eval_losses = [] if self.bidirectional: next_ids = [self.next_token_id, self.next_token_id_reverse] else: next_ids = [self.next_token_id] for id_placeholder, lstm_output_flat in zip(next_ids, lstm_outputs): next_token_id_flat = tf.reshape(id_placeholder, [-1, 1]) with tf.control_dependencies([lstm_output_flat]): sampled_losses = tf.nn.sampled_softmax_loss(self.softmax_W, self.softmax_b, next_token_id_flat, lstm_output_flat, self.options[], self.options[], num_true=1) output_scores = tf.matmul( lstm_output_flat, tf.transpose(self.softmax_W) ) + self.softmax_b losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=output_scores, labels=tf.squeeze(next_token_id_flat, squeeze_dims=[1]) ) sampled_losses = tf.reshape(sampled_losses, [self.options[], -1]) losses = tf.reshape(losses, [self.options[], -1]) self.individual_train_losses.append(tf.reduce_mean(sampled_losses, axis=1)) self.individual_eval_losses.append(tf.reduce_mean(losses, axis=1)) if self.bidirectional: self.total_train_loss = 0.5 * (self.individual_train_losses[0] + self.individual_train_losses[1]) self.total_eval_loss = 0.5 * (self.individual_eval_losses[0] + self.individual_eval_losses[1]) else: self.total_train_loss = self.individual_train_losses[0] self.total_eval_loss = self.individual_eval_losses[0]
Create: self.total_loss: total loss op for training self.softmax_W, softmax_b: the softmax variables self.next_token_id / _reverse: placeholders for gold input
385,110
def get_extra_context(site, ctx): ctx[] = site ctx[] = feeds = site.active_feeds.order_by() def get_mod_chk(k): mod, chk = ( (max(vals) if vals else None) for vals in ( filter(None, it.imap(op.attrgetter(k), feeds)) for k in [, ] ) ) chk = chk or datetime(1970, 1, 1, 0, 0, 0, 0, timezone.utc) ctx[], ctx[] = mod or chk, chk return ctx[k] for k in , : ctx[k] = lambda: get_mod_chk(k) ctx[] = ctx[] =\ .format(settings.STATIC_URL, site.template)
Returns extra data useful to the templates.
385,111
def copyFileToHdfs(localFilePath, hdfsFilePath, hdfsClient, override=True): if not os.path.exists(localFilePath): raise Exception() if os.path.isdir(localFilePath): raise Exception() if hdfsClient.exists(hdfsFilePath): if override: hdfsClient.delete(hdfsFilePath) else: return False try: hdfsClient.copy_from_local(localFilePath, hdfsFilePath) return True except Exception as exception: nni_log(LogType.Error, .format(localFilePath, hdfsFilePath, str(exception))) return False
Copy a local file to HDFS directory
385,112
def simple_response(self, status, msg=): status = str(status) proto_status = % (self.server.protocol, status) content_length = % len(msg) content_type = buf = [ proto_status.encode(), content_length.encode(), content_type.encode(), ] if status[:3] in (, ): self.close_connection = True if self.response_protocol == : buf.append(b) else: status = buf.append(CRLF) if msg: if isinstance(msg, six.text_type): msg = msg.encode() buf.append(msg) try: self.conn.wfile.write(EMPTY.join(buf)) except socket.error as ex: if ex.args[0] not in errors.socket_errors_to_ignore: raise
Write a simple response back to the client.
385,113
def redirectURL(self, realm, return_to=None, immediate=False): message = self.getMessage(realm, return_to, immediate) return message.toURL(self.endpoint.server_url)
Returns a URL with an encoded OpenID request. The resulting URL is the OpenID provider's endpoint URL with parameters appended as query arguments. You should redirect the user agent to this URL. OpenID 2.0 endpoints also accept POST requests, see C{L{shouldSendRedirect}} and C{L{formMarkup}}. @param realm: The URL (or URL pattern) that identifies your web site to the user when she is authorizing it. @type realm: str @param return_to: The URL that the OpenID provider will send the user back to after attempting to verify her identity. Not specifying a return_to URL means that the user will not be returned to the site issuing the request upon its completion. @type return_to: str @param immediate: If True, the OpenID provider is to send back a response immediately, useful for behind-the-scenes authentication attempts. Otherwise the OpenID provider may engage the user before providing a response. This is the default case, as the user may need to provide credentials or approve the request before a positive response can be sent. @type immediate: bool @returns: The URL to redirect the user agent to. @returntype: str
385,114
def is_response(cls, response): if response.body: if cls.is_file(response.body): return True
Return whether the document is likely to be a Sitemap.
385,115
def check_update_J(self): self._J_update_counter += 1 update = self._J_update_counter >= self.update_J_frequency return update & (not self._fresh_JTJ)
Checks if the full J should be updated. Right now, just updates after update_J_frequency loops
385,116
def parse_json_date(value): if not value: return None return datetime.datetime.strptime(value, JSON_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
Parses an ISO8601 formatted datetime from a string value
385,117
def _initializer_for(self, raw_name: str, cooked_name: str, prefix: Optional[str]) -> List[str]: mt_val = self._ebnf.mt_value(self._typ) rval = [] if is_valid_python(raw_name): if prefix: rval.append(f"self.{raw_name} = {prefix}.{raw_name}") else: cons = raw_name rval.append(f"self.{raw_name} = {cons}") elif is_valid_python(cooked_name): if prefix: rval.append(f"setattr(self, , getattr({prefix}, )") else: cons = f"{cooked_name} if {cooked_name} is not {mt_val} else _kwargs.get(, {mt_val})" rval.append(f"setattr(self, , {cons})") else: getter = f"_kwargs.get(, {mt_val})" if prefix: rval.append(f"setattr(self, , getattr({prefix}, )") else: rval.append(f"setattr(self, , {getter})") return rval
Create an initializer entry for the entry :param raw_name: name unadjusted for python compatibility. :param cooked_name: name that may or may not be python compatible :param prefix: owner of the element - used when objects passed as arguments :return: Initialization statements
385,118
def columns(self): if self.df.empty: return None columns = [] sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index)) sample = self.df if sample_size: sample = self.df.sample(sample_size) for col in self.df.dtypes.keys(): db_type_str = ( self._type_dict.get(col) or self.db_type(self.df.dtypes[col]) ) column = { : col, : self.agg_func(self.df.dtypes[col], col), : db_type_str, : self.is_date(self.df.dtypes[col], db_type_str), : self.is_dimension(self.df.dtypes[col], col), } if not db_type_str or db_type_str.upper() == : v = sample[col].iloc[0] if not sample[col].empty else None if isinstance(v, str): column[] = elif isinstance(v, int): column[] = elif isinstance(v, float): column[] = elif isinstance(v, (datetime, date)): column[] = column[] = True column[] = False if ( column[] == and self.datetime_conversion_rate(sample[col]) > INFER_COL_TYPES_THRESHOLD): column.update({ : True, : False, : None, }) if not column[]: column.pop(, None) columns.append(column) return columns
Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg.
385,119
def get_num_chunks(length, chunksize): r n_chunks = int(math.ceil(length / chunksize)) return n_chunks
r""" Returns the number of chunks that a list will be split into given a chunksize. Args: length (int): chunksize (int): Returns: int: n_chunks CommandLine: python -m utool.util_progress --exec-get_num_chunks:0 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> length = 2000 >>> chunksize = 256 >>> n_chunks = get_num_chunks(length, chunksize) >>> result = ('n_chunks = %s' % (six.text_type(n_chunks),)) >>> print(result) n_chunks = 8
385,120
def create( cls, api_key=None, idempotency_key=None, stripe_account=None, **params ): url = cls.class_url() headers = populate_headers(idempotency_key) return make_request( cls, , url, stripe_account=stripe_account, headers=headers, params=params)
Return a deferred.
385,121
def register_func_list(self, func_and_handler): for func, handler in func_and_handler: self._function_dispatch.register(func, handler) self.dispatch.cache_clear()
register a function to determine if the handle should be used for the type
385,122
def candidate(self, cand_func, args=None, kwargs=None, name=, context=None): self._candidates.append({ : cand_func, : args or [], : kwargs or {}, : name, : context or {}, })
Adds a candidate function to an experiment. Can be used multiple times for multiple candidates. :param callable cand_func: your control function :param iterable args: positional arguments to pass to your function :param dict kwargs: keyword arguments to pass to your function :param string name: a name for your observation :param dict context: observation-specific context
385,123
def safe_unicode(obj, *args): try: return unicode(obj, *args) except UnicodeDecodeError: ascii_text = str(obj).encode() try: return unicode(ascii_text) except NameError: return obj
return the unicode representation of obj
385,124
def libvlc_media_new_path(p_instance, path): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, ctypes.c_char_p) return f(p_instance, path)
Create a media for a certain file path. See L{libvlc_media_release}. @param p_instance: the instance. @param path: local filesystem path. @return: the newly created media or NULL on error.
385,125
def http_time(time): return formatdate(timeval=mktime(time.timetuple()), localtime=False, usegmt=True)
Formats a datetime as an RFC 1123 compliant string.
385,126
def get_cameras(self): response = api.request_homescreen(self) try: all_cameras = {} for camera in response[]: camera_network = str(camera[]) camera_name = camera[] camera_id = camera[] camera_info = {: camera_name, : camera_id} if camera_network not in all_cameras: all_cameras[camera_network] = [] all_cameras[camera_network].append(camera_info) return all_cameras except KeyError: _LOGGER.error("Initialization failue. Could not retrieve cameras.") return {}
Retrieve a camera list for each onboarded network.
385,127
def sensoryCompute(self, activeMinicolumns, learn): inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "basalGrowthCandidates": self.getLearnableLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams)
@param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing.
385,128
def start_background_task(self, target, *args, **kwargs): return self.server.start_background_task(target, *args, **kwargs)
Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. This function returns an object compatible with the `Thread` class in the Python standard library. The `start()` method on this object is already called by this function.
385,129
def _build_url(self, endpoint): try: path = self.endpoints[endpoint] except KeyError: msg = raise ValueError(msg.format(endpoint)) absolute_url = urljoin(self.target, path) return absolute_url
Builds the absolute URL using the target and desired endpoint.
385,130
def create_ebnf_parser(files): flag = False for belspec_fn in files: if config["bel"]["lang"]["specification_github_repo"]: tmpl_fn = get_ebnf_template() ebnf_fn = belspec_fn.replace(".yaml", ".ebnf") if not os.path.exists(ebnf_fn) or os.path.getmtime(belspec_fn) > os.path.getmtime(ebnf_fn): with open(belspec_fn, "r") as f: belspec = yaml.load(f, Loader=yaml.SafeLoader) tmpl_dir = os.path.dirname(tmpl_fn) tmpl_basename = os.path.basename(tmpl_fn) bel_major_version = belspec["version"].split(".")[0] env = jinja2.Environment( loader=jinja2.FileSystemLoader(tmpl_dir) ) template = env.get_template(tmpl_basename) relations_list = [ (relation, belspec["relations"]["info"][relation]["abbreviation"]) for relation in belspec["relations"]["info"] ] relations_list = sorted(list(itertools.chain(*relations_list)), key=len, reverse=True) functions_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "primary" ] functions_list = sorted(list(itertools.chain(*functions_list)), key=len, reverse=True) modifiers_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "modifier" ] modifiers_list = sorted(list(itertools.chain(*modifiers_list)), key=len, reverse=True) created_time = datetime.datetime.now().strftime("%B %d, %Y - %I:%M:%S%p") ebnf = template.render( functions=functions_list, m_functions=modifiers_list, relations=relations_list, bel_version=belspec["version"], bel_major_version=bel_major_version, created_time=created_time, ) with open(ebnf_fn, "w") as f: f.write(ebnf) parser_fn = ebnf_fn.replace(".ebnf", "_parser.py") parser = tatsu.to_python_sourcecode(ebnf, filename=parser_fn) flag = True with open(parser_fn, "wt") as f: f.write(parser) if flag: importlib.invalidate_caches()
Create EBNF files and EBNF-based parsers
385,131
def reply_count(self, url, mode=5, after=0): sql = [, , , , , ] return dict(self.db.execute(sql, [url, mode, mode, after]).fetchall())
Return comment count for main thread and all reply threads for one url.
385,132
def check_venv(self): if self.zappa: venv = self.zappa.get_current_venv() else: venv = Zappa.get_current_venv() if not venv: raise ClickException( click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" + "Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan"))
Ensure we're inside a virtualenv.
385,133
def findExtNum(self, extname=None, extver=1): extnum = None extname = extname.upper() if not self._isSimpleFits: for ext in self._image: if (hasattr(ext,) and in ext._extension and (ext.extname == extname) and (ext.extver == extver)): extnum = ext.extnum else: log.info("Image is simple fits") return extnum
Find the extension number of the give extname and extver.
385,134
def get(self, block=True, timeout=None): return self._queue.get(block, timeout)
Get item from underlying queue.
385,135
def add(self, user, password): if self.__contains__(user): raise UserExists self.new_users[user] = self._encrypt_password(password) + "\n"
Adds a user with password
385,136
def load_aead(self, public_id): connection = self.engine.connect() trans = connection.begin() try: s = sqlalchemy.select([self.aead_table]).where( (self.aead_table.c.public_id == public_id) & self.aead_table.c.keyhandle.in_([kh[1] for kh in self.key_handles])) result = connection.execute(s) for row in result: kh_int = row[] aead = pyhsm.aead_cmd.YHSM_GeneratedAEAD(None, kh_int, ) aead.data = row[] aead.nonce = row[] return aead except Exception as e: trans.rollback() raise Exception("No AEAD in DB for public_id %s (%s)" % (public_id, str(e))) finally: connection.close()
Loads AEAD from the specified database.
385,137
def evaluate(ref_time, ref_freqs, est_time, est_freqs, **kwargs): scores = collections.OrderedDict() (scores[], scores[], scores[], scores[], scores[], scores[], scores[], scores[], scores[], scores[], scores[], scores[], scores[], scores[]) = util.filter_kwargs( metrics, ref_time, ref_freqs, est_time, est_freqs, **kwargs) return scores
Evaluate two multipitch (multi-f0) transcriptions, where the first is treated as the reference (ground truth) and the second as the estimate to be evaluated (prediction). Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_ragged_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_ragged_time_series('est.txt') >>> scores = mir_eval.multipitch.evaluate(ref_time, ref_freq, ... est_time, est_freq) Parameters ---------- ref_time : np.ndarray Time of each reference frequency value ref_freqs : list of np.ndarray List of np.ndarrays of reference frequency values est_time : np.ndarray Time of each estimated frequency value est_freqs : list of np.ndarray List of np.ndarrays of estimate frequency values kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved.
385,138
def Enable(self, value): "enable or disable all top menus" for i in range(self.GetMenuCount()): self.EnableTop(i, value)
enable or disable all top menus
385,139
def map_reduce(self, map_function, data, function_kwargs=None, chunk_size=None, data_length=None): if data_length is None: data_length = len(data) if not chunk_size: chunk_size = self.calculate_best_chunk_size(data_length) chunk_generator = self.partition(data, chunk_size=chunk_size) map_kwargs = {"map_function": map_function, "kwargs": function_kwargs} if hasattr(self, "progressbar_title"): total_number_of_expected_results = math.ceil(data_length / chunk_size) result = tqdm(self.distribute(_function_with_partly_reduce, chunk_generator, map_kwargs), total=total_number_of_expected_results, desc=self.progressbar_title, disable=self.disable_progressbar) else: result = self.distribute(_function_with_partly_reduce, chunk_generator, map_kwargs), result = list(itertools.chain.from_iterable(result)) return result
This method contains the core functionality of the DistributorBaseClass class. It maps the map_function to each element of the data and reduces the results to return a flattened list. How the jobs are calculated, is determined by the classes :func:`tsfresh.utilities.distribution.DistributorBaseClass.distribute` method, which can distribute the jobs in multiple threads, across multiple processing units etc. To not transport each element of the data individually, the data is split into chunks, according to the chunk size (or an empirical guess if none is given). By this, worker processes not tiny but adequate sized parts of the data. :param map_function: a function to apply to each data item. :type map_function: callable :param data: the data to use in the calculation :type data: iterable :param function_kwargs: parameters for the map function :type function_kwargs: dict of string to parameter :param chunk_size: If given, chunk the data according to this size. If not given, use an empirical value. :type chunk_size: int :param data_length: If the data is a generator, you have to set the length here. If it is none, the length is deduced from the len of the data. :type data_length: int :return: the calculated results :rtype: list
385,140
def dense(x, output_dim, reduced_dims=None, expert_dims=None, use_bias=True, activation=None, master_dtype=tf.float32, slice_dtype=tf.float32, variable_dtype=None, name=None): if variable_dtype is None: variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype) if expert_dims is None: expert_dims = [] if reduced_dims is None: reduced_dims = x.shape.dims[-1:] w_shape = mtf.Shape(expert_dims + reduced_dims + [output_dim]) output_shape = mtf.Shape( [d for d in x.shape.dims if d not in reduced_dims] + [output_dim]) with tf.variable_scope(name, default_name="dense"): stddev = mtf.list_product(d.size for d in reduced_dims) ** -0.5 w = mtf.get_variable( x.mesh, "kernel", w_shape, initializer=tf.random_normal_initializer(stddev=stddev), dtype=variable_dtype) w = mtf.cast(w, x.dtype) y = mtf.einsum([x, w], output_shape) if use_bias: b = mtf.get_variable( x.mesh, "bias", mtf.Shape(expert_dims + [output_dim]), initializer=tf.zeros_initializer(), dtype=variable_dtype) y += b if activation is not None: y = activation(y) return y
Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. output_dim: a mtf.Dimension reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If omitted, we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) variable_dtype: a mtf.VariableDType name: a string. variable scope. Returns: a mtf.Tensor of shape [..., output_dim].
385,141
def reset_course_favorites(self): path = {} data = {} params = {} self.logger.debug("DELETE /api/v1/users/self/favorites/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/self/favorites/courses".format(**path), data=data, params=params, no_data=True)
Reset course favorites. Reset the current user's course favorites to the default automatically generated list of enrolled courses
385,142
def changes(self): report = {} for k, k_changes in self._changes.items(): if len(k_changes) == 1: report[k] = k_changes[0] else: first = k_changes[0] last = k_changes[-1] if first.old_value != last.new_value or first.old_raw_str_value != last.new_raw_str_value: report[k] = _Change( first.old_value, last.new_value, first.old_raw_str_value, last.new_raw_str_value, ) return report
Returns a mapping of items to their effective change objects which include the old values and the new. The mapping includes only items whose value or raw string value has changed in the context.
385,143
def get_single_score(self, point, centroids=None, sd=None): normalised_point = array(point) / array(sd) observation_score = { : point, : normalised_point.tolist(), } distances = [ euclidean(normalised_point, centroid) for centroid in centroids ] return int(distances.index(min(distances)))
Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \ Attributes: observation_score (dict): Original received point and normalised point. :Example: >>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]} nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \ wins. distances (list (float)): List of distances from the Point to each cluster centroid. E.g: >>> [2.38086238, 0.12382605, 2.0362993, 1.43195021] centroids (list (list (float))): A list of the current centroidswhen queried. E.g: >>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \ [0.39421099, 2.36783733] ] :param point: the point to classify :type point: pandas.DataFrame :param centroids: the centroids :type centroids: np.array :param sd: the standard deviation :type sd: np.array :return score: the score for a given observation :rtype score: int
385,144
def _graph_wrap(func, graph): @wraps(func) def _wrapped(*args, **kwargs): with graph.as_default(): return func(*args, **kwargs) return _wrapped
Constructs function encapsulated in the graph.
385,145
def path_components(path): components = [] while True: (new_path, tail) = os.path.split(path) components.append(tail) if new_path == path: break path = new_path components.append(new_path) components.reverse() return components
Return the individual components of a given file path string (for the local operating system). Taken from https://stackoverflow.com/q/21498939/438386
385,146
def make_forecasting_frame(x, kind, max_timeshift, rolling_direction): n = len(x) if isinstance(x, pd.Series): t = x.index else: t = range(n) df = pd.DataFrame({"id": ["id"] * n, "time": t, "value": x, "kind": kind}) df_shift = roll_time_series(df, column_id="id", column_sort="time", column_kind="kind", rolling_direction=rolling_direction, max_timeshift=max_timeshift) def mask_first(x): result = np.ones(len(x)) result[-1] = 0 return result mask = df_shift.groupby([])[].transform(mask_first).astype(bool) df_shift = df_shift[mask] return df_shift, df["value"][1:]
Takes a singular time series x and constructs a DataFrame df and target vector y that can be used for a time series forecasting task. The returned df will contain, for every time stamp in x, the last max_timeshift data points as a new time series, such can be used to fit a time series forecasting model. See :ref:`forecasting-label` for a detailed description of the rolling process and how the feature matrix and target vector are derived. The returned time series container df, will contain the rolled time series as a flat data frame, the first format from :ref:`data-formats-label`. When x is a pandas.Series, the index will be used as id. :param x: the singular time series :type x: np.array or pd.Series :param kind: the kind of the time series :type kind: str :param rolling_direction: The sign decides, if to roll backwards (if sign is positive) or forwards in "time" :type rolling_direction: int :param max_timeshift: If not None, shift only up to max_timeshift. If None, shift as often as possible. :type max_timeshift: int :return: time series container df, target vector y :rtype: (pd.DataFrame, pd.Series)
385,147
def refactor_use_function(self, offset): try: refactor = UseFunction(self.project, self.resource, offset) except RefactoringError as e: raise Fault( .format(e), code=400 ) return self._get_changes(refactor)
Use the function at point wherever possible.
385,148
def addGenotypePhenotypeSearchOptions(parser): parser.add_argument( "--phenotype_association_set_id", "-s", default=None, help="Only return associations from this phenotype_association_set.") parser.add_argument( "--feature_ids", "-f", default=None, help="Only return associations for these features.") parser.add_argument( "--phenotype_ids", "-p", default=None, help="Only return associations for these phenotypes.") parser.add_argument( "--evidence", "-E", default=None, help="Only return associations to this evidence.")
Adds options to a g2p searches command line parser.
385,149
def execute_command_in_dir(command, directory, verbose=DEFAULTS[], prefix="Output: ", env=None): if os.name == : directory = os.path.normpath(directory) print_comment("Executing: (%s) in directory: %s" % (command, directory), verbose) if env is not None: print_comment("Extra env variables %s" % (env), verbose) try: if os.name == : return_string = subprocess.check_output(command, cwd=directory, shell=True, env=env, close_fds=False) else: return_string = subprocess.check_output(command, cwd=directory, shell=True, stderr=subprocess.STDOUT, env=env, close_fds=True) return_string = return_string.decode("utf-8") print_comment( % \ (prefix,return_string.replace(,+prefix)), verbose) return return_string except AttributeError: print_comment_v() return_string = subprocess.Popen(command, cwd=directory, shell=True, stdout=subprocess.PIPE).communicate()[0] return return_string except subprocess.CalledProcessError as e: print_comment_v(%e) print_comment_v(%(prefix,e.output.decode().replace(,+prefix))) return None except: print_comment_v(%e) return None print_comment("Finished execution", verbose)
Execute a command in specific working directory
385,150
def as_nddata(self, nddata_class=None): "Return a version of ourself as an astropy.nddata.NDData object" if nddata_class is None: from astropy.nddata import NDData nddata_class = NDData ahdr = self.get_header() header = OrderedDict(ahdr.items()) data = self.get_mddata() wcs = None if hasattr(self, ) and self.wcs is not None: wcs = self.wcs.wcs ndd = nddata_class(data, wcs=wcs, meta=header) return ndd
Return a version of ourself as an astropy.nddata.NDData object
385,151
def options(self, context, module_options): self.contype = self.port = 5900 self.password = None if not in module_options: context.log.error() exit(1) if in module_options: self.contype = module_options[] if in module_options: self.port = int(module_options[]) self.password = module_options[] self.ps_script1 = obfs_ps_script() self.ps_script2 = obfs_ps_script()
CONTYPE Specifies the VNC connection type, choices are: reverse, bind (default: reverse). PORT VNC Port (default: 5900) PASSWORD Specifies the connection password.
385,152
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False context = line[0:match.start(1) - 1] if Match(r, context): return False if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r, context): return False if context.endswith() or context.endswith(): return False if Match(r, remainder): return False if Match(r, line[0:match.start(0)]): return False (cast_type, match.group(1))) return True
Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise.
385,153
def add_request_handler_chain(self, request_handler_chain): if request_handler_chain is None or not isinstance( request_handler_chain, GenericRequestHandlerChain): raise DispatchException( "Request Handler Chain is not a GenericRequestHandlerChain " "instance") self._request_handler_chains.append(request_handler_chain)
Checks the type before adding it to the request_handler_chains instance variable. :param request_handler_chain: Request Handler Chain instance. :type request_handler_chain: RequestHandlerChain :raises: :py:class:`ask_sdk_runtime.exceptions.DispatchException` if a null input is provided or if the input is of invalid type
385,154
def delete_servers(*servers, **options): *** test = options.pop(, False) commit = options.pop(, True) return __salt__[](, servers=servers, test=test, commit=commit, inherit_napalm_device=napalm_device)
Removes NTP servers configured on the device. :param servers: list of IP Addresses/Domain Names to be removed as NTP servers :param test (bool): discard loaded config. By default ``test`` is False (will not dicard the changes) :param commit (bool): commit loaded config. By default ``commit`` is True (will commit the changes). Useful when the user does not want to commit after each change, but after a couple. By default this function will commit the config changes (if any). To load without committing, use the ``commit`` option. For dry run use the ``test`` argument. CLI Example: .. code-block:: bash salt '*' ntp.delete_servers 8.8.8.8 time.apple.com salt '*' ntp.delete_servers 172.17.17.1 test=True # only displays the diff salt '*' ntp.delete_servers 192.168.0.1 commit=False # preserves the changes, but does not commit
385,155
def makedirs(self, path, mode=0x777): "Super-mkdir: create a leaf directory and all intermediate ones." self.directory_create(path, mode, [library.DirectoryCreateFlag.parents])
Super-mkdir: create a leaf directory and all intermediate ones.
385,156
def is_provider_configured(opts, provider, required_keys=(), log_message=True, aliases=()): if in provider: alias, driver = provider.split() if alias not in opts[]: return False if driver not in opts[][alias]: return False for key in required_keys: if opts[][alias][driver].get(key, None) is None: if log_message is True: return opts[][alias][driver] for alias, drivers in six.iteritems(opts[]): for driver, provider_details in six.iteritems(drivers): if driver != provider and driver not in aliases: continue ) skip_provider = True break if skip_provider: continue return provider_details return False
Check and return the first matching and fully configured cloud provider configuration.
385,157
def addFeature(self, f, conflict="error", missing="other"): OPTIONS = ["error", "ignore", "me", "other"] assert missing in OPTIONS, "Invalid value in `missing`." assert conflict in OPTIONS, "Invalid value in `missing`." if f.prop not in self.props and missing == "error": raise Exception("Property has not set.") elif f.prop not in self.props and missing in ["ignore", "first"]: return if isinstance(f.value, int) or isinstance(f.value, float): if f.operator == "=": inter1 = (f, f) elif f.operator[0] == "<": inter1 = (None, f) elif f.operator[0] == ">": inter1 = (f, None) inter0 = self.props.get(f.prop, (None, None)) try: self.props[f.prop] = Features._applyInter(inter0, inter1, conflict) except Exception as e: raise RADLParseException("%s. Involved features: %s" % (e, [str(f0) for f0 in inter0]), line=f.line) elif isinstance(f, SoftFeatures): self.props.setdefault(f.prop, []).append(f) elif f.operator == "contains": if f.prop in self.props and f.value.getValue("name") in self.props[f.prop]: feature = self.props[f.prop][f.value.getValue("name")].clone() for f0 in f.value.features: feature.value.addFeature(f0, conflict, missing) self.props[f.prop][f.value.getValue("name")] = feature else: self.props.setdefault(f.prop, {})[f.value.getValue("name")] = f else: value0 = self.props.get(f.prop, None) if not value0 or (conflict == "other"): self.props[f.prop] = f elif value0.value != f.value and conflict == "error": raise RADLParseException("Conflict adding `%s` because `%s` is already set and conflict is" " %s" % (f, value0, conflict), line=f.line)
Add a feature. Args: - f(Feature): feature to add. - conflict(str): if a property hasn't compatible values/constrains, do: - ``"error"``: raise exception. - ``"ignore"``: go on. - ``"me"``: keep the old value. - ``"other"``: set the passed value. - missing(str): if a property has not been set yet, do: - ``"error"``: raise exception. - ``"ignore"``: do nothning. - ``"me"``: do nothing. - ``"other"``: set the passed value.
385,158
def _compute_include_paths(self, target): paths = OrderedSet() paths.add(os.path.join(get_buildroot(), target.target_base)) def collect_paths(dep): if not dep.has_sources(): return paths.add(os.path.join(get_buildroot(), dep.target_base)) collect_paths(target) target.walk(collect_paths) return paths
Computes the set of paths that thrifty uses to lookup imports. The IDL files under these paths are not compiled, but they are required to compile downstream IDL files. :param target: the JavaThriftyLibrary target to compile. :return: an ordered set of directories to pass along to thrifty.
385,159
def edit_team_push_restrictions(self, *teams): assert all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in teams), teams headers, data = self._requester.requestJsonAndCheck( "POST", self.protection_url + "/restrictions/teams", input=teams )
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_ :teams: list of strings
385,160
def grant_user_access(self, user, db_names, strict=True): user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) db_names = self._get_db_names(db_names, strict=strict) dbs = [{"name": db_name} for db_name in db_names] body = {"databases": dbs} try: resp, resp_body = self.api.method_put(uri, body=body) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User does not exist." % user)
Gives access to the databases listed in `db_names` to the user. You may pass in either a single db or a list of dbs. If any of the databases do not exist, a NoSuchDatabase exception will be raised, unless you specify `strict=False` in the call.
385,161
def beta_diversity(self, metric="braycurtis", rank="auto"): if metric not in ("jaccard", "braycurtis", "cityblock"): raise OneCodexException( "For beta diversity, metric must be one of: jaccard, braycurtis, cityblock" ) if self._guess_normalized(): raise OneCodexException("Beta diversity requires unnormalized read counts.") df = self.to_df(rank=rank, normalize=False) counts = [] for c_id in df.index: counts.append(df.loc[c_id].tolist()) return skbio.diversity.beta_diversity(metric, counts, df.index.tolist())
Calculate the diversity between two communities. Parameters ---------- metric : {'jaccard', 'braycurtis', 'cityblock'} The distance metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- skbio.stats.distance.DistanceMatrix, a distance matrix.
385,162
def read_json(self): with reading_ancillary_files(self): metadata = super(ImpactLayerMetadata, self).read_json() if in metadata: for provenance_step in metadata[]: try: title = provenance_step[] if in title: self.append_if_provenance_step( provenance_step[], provenance_step[], provenance_step[], provenance_step[] ) else: self.append_provenance_step( provenance_step[], provenance_step[], provenance_step[], ) except KeyError: pass if in metadata: self.summary_data = metadata[] return metadata
read metadata from json and set all the found properties. :return: the read metadata :rtype: dict
385,163
def angleDiff(angle1, angle2, take_smaller=True): a = np.arctan2(np.sin(angle1 - angle2), np.cos(angle1 - angle2)) if isinstance(a, np.ndarray) and take_smaller: a = np.abs(a) ab = np.abs(np.pi - a) with np.errstate(invalid=): i = a > ab a[i] = ab[i] return a
smallest difference between 2 angles code from http://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles
385,164
def get_default_jvm_opts(tmp_dir=None, parallel_gc=False): opts = ["-XX:+UseSerialGC"] if not parallel_gc else [] if tmp_dir: opts.append("-Djava.io.tmpdir=%s" % tmp_dir) return opts
Retrieve default JVM tuning options Avoids issues with multiple spun up Java processes running into out of memory errors. Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency and responsiveness which are not needed for batch jobs. https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027 https://wiki.csiro.au/pages/viewpage.action?pageId=545034311 http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect However, serial GC causes issues with Spark local runs so we use parallel for those cases: https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070
385,165
def get_structure_by_id(self, cod_id, **kwargs): r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id) return Structure.from_str(r.text, fmt="cif", **kwargs)
Queries the COD for a structure by id. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A Structure.
385,166
def dec(self,*args,**kwargs): _check_roSet(self,kwargs,) radec= self._radec(*args,**kwargs) return radec[:,1]
NAME: dec PURPOSE: return the declination INPUT: t - (optional) time at which to get dec obs=[X,Y,Z] - (optional) position of observer (in kpc) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: dec(t) HISTORY: 2011-02-23 - Written - Bovy (NYU)
385,167
def record_process(self, process, prg=): self._log(self.logFileProcess, force_to_string(process), prg)
log a process or program - log a physical program (.py, .bat, .exe)
385,168
def put_container(self, path): path = make_path(path) container = self for segment in path: try: container = container._values[segment] if not isinstance(container, ValueTree): raise ValueError() except KeyError: valuetree = ValueTree() container._values[segment] = valuetree container = valuetree
Creates a container at the specified path, creating any necessary intermediate containers. :param path: str or Path instance :raises ValueError: A component of path is a field name.
385,169
def MAKE_WPARAM(wParam): wParam = ctypes.cast(wParam, LPVOID).value if wParam is None: wParam = 0 return wParam
Convert arguments to the WPARAM type. Used automatically by SendMessage, PostMessage, etc. You shouldn't need to call this function.
385,170
def _build_dependent_model_list(self, obj_schema): dep_models_list = [] if obj_schema: obj_schema[] = obj_schema.get(, ) if obj_schema[] == : dep_models_list.extend(self._build_dependent_model_list(obj_schema.get(, {}))) else: ref = obj_schema.get() if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: properties = obj_schema.get() if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list))
Helper function to build the list of models the given object schema is referencing.
385,171
def set_learning_rate(self, lr): if not isinstance(self._optimizer, opt.Optimizer): raise UserWarning("Optimizer has to be defined before its learning " "rate is mutated.") else: self._optimizer.set_learning_rate(lr)
Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer.
385,172
def unquote(s): res = s.split() if len(res) == 1: return s s = res[0] for item in res[1:]: try: s += _hextochr[item[:2]] + item[2:] except KeyError: s += + item except UnicodeDecodeError: s += chr(int(item[:2], 16)) + item[2:] return s
unquote('abc%20def') -> 'abc def'.
385,173
def print_multi_line(content, force_single_line, sort_key): global last_output_lines global overflow_flag global is_atty if not is_atty: if isinstance(content, list): for line in content: print(line) elif isinstance(content, dict): for k, v in sorted(content.items(), key=sort_key): print("{}: {}".format(k, v)) else: raise TypeError("Excepting types: list, dict. Got: {}".format(type(content))) return columns, rows = get_terminal_size() lines = lines_of_content(content, columns) if force_single_line is False and lines > rows: overflow_flag = True elif force_single_line is True and len(content) > rows: overflow_flag = True print("\b" * columns, end="") if isinstance(content, list): for line in content: _line = preprocess(line) print_line(_line, columns, force_single_line) elif isinstance(content, dict): for k, v in sorted(content.items(), key=sort_key): _k, _v = map(preprocess, (k, v)) print_line("{}: {}".format(_k, _v), columns, force_single_line) else: raise TypeError("Excepting types: list, dict. Got: {}".format(type(content))) print(" " * columns * (last_output_lines - lines), end="") print(magic_char * (max(last_output_lines, lines)-1), end="") sys.stdout.flush() last_output_lines = lines
'sort_key' 参数只在 dict 模式时有效 'sort_key' parameter only available in 'dict' mode
385,174
def uploadFiles(self): for each_file in self.filesToSync: self.uploadFile(each_file["name"], each_file["ispickle"], each_file["at_home"])
Uploads all the files in 'filesToSync'
385,175
def _compute_closed_central_moments(self, central_from_raw_exprs, n_counter, k_counter): n_species = len([None for pm in k_counter if pm.order == 1]) covariance_matrix = sp.Matrix(n_species, n_species, lambda x,y: self._get_covariance_symbol(n_counter,x,y)) positive_n_counter = [n for n in n_counter if n.order > 1] out_mat = [self._compute_one_closed_central_moment(n, covariance_matrix) for n in positive_n_counter ] return sp.Matrix(out_mat)
Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments up to max_order + 1 order. :param central_from_raw_exprs: :param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments :type n_counter: list[:class:`~means.core.descriptors.Moment`] :param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments :type k_counter: list[:class:`~means.core.descriptors.Moment`] :return: a vector of parametric expression for central moments
385,176
def compute_lower_upper_errors(sample, num_sigma=1): if num_sigma > 3: raise ValueError("Number of sigma-constraints restircted to three. %s not valid" % num_sigma) num = len(sample) num_threshold1 = int(round((num-1)*0.833)) num_threshold2 = int(round((num-1)*0.977249868)) num_threshold3 = int(round((num-1)*0.998650102)) median = np.median(sample) sorted_sample = np.sort(sample) if num_sigma > 0: upper_sigma1 = sorted_sample[num_threshold1-1] lower_sigma1 = sorted_sample[num-num_threshold1-1] else: return median, [[]] if num_sigma > 1: upper_sigma2 = sorted_sample[num_threshold2-1] lower_sigma2 = sorted_sample[num-num_threshold2-1] else: return median, [[median-lower_sigma1, upper_sigma1-median]] if num_sigma > 2: upper_sigma3 = sorted_sample[num_threshold3-1] lower_sigma3 = sorted_sample[num-num_threshold3-1] return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median], [median-lower_sigma3, upper_sigma3-median]] else: return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median]]
computes the upper and lower sigma from the median value. This functions gives good error estimates for skewed pdf's :param sample: 1-D sample :return: median, lower_sigma, upper_sigma
385,177
def last(args, dbtype=None): p = OptionParser(last.__doc__) p.add_option("--dbtype", default="nucl", choices=("nucl", "prot"), help="Molecule type of subject database") p.add_option("--path", help="Specify LAST path") p.add_option("--mask", default=False, action="store_true", help="Invoke -c in lastdb") p.add_option("--format", default="BlastTab", choices=("TAB", "MAF", "BlastTab", "BlastTab+"), help="Output format") p.add_option("--minlen", default=0, type="int", help="Filter alignments by how many bases match") p.add_option("--minid", default=0, type="int", help="Minimum sequence identity") p.set_cpus() p.set_params() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) subject, query = args path = opts.path cpus = opts.cpus if not dbtype: dbtype = opts.dbtype getpath = lambda x: op.join(path, x) if path else x lastdb_bin = getpath("lastdb") lastal_bin = getpath("lastal") subjectdb = subject.rsplit(".", 1)[0] run_lastdb(infile=subject, outfile=subjectdb + ".prj", mask=opts.mask, \ lastdb_bin=lastdb_bin, dbtype=dbtype) u = 2 if opts.mask else 0 cmd = "{0} -u {1}".format(lastal_bin, u) cmd += " -P {0} -i3G".format(cpus) cmd += " -f {0}".format(opts.format) cmd += " {0} {1}".format(subjectdb, query) minlen = opts.minlen minid = opts.minid extra = opts.extra assert minid != 100, "Perfect match not yet supported" mm = minid / (100 - minid) if minlen: extra += " -e{0}".format(minlen) if minid: extra += " -r1 -q{0} -a{0} -b{0}".format(mm) if extra: cmd += " " + extra.strip() lastfile = get_outfile(subject, query, suffix="last") sh(cmd, outfile=lastfile)
%prog database.fasta query.fasta Run LAST by calling LASTDB and LASTAL. LAST program available: <http://last.cbrc.jp> Works with LAST-719.
385,178
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError() return struct.pack(self.FMT, self.extent_length, self.log_block_num, self.part_ref_num, self.impl_use)
A method to generate the string representing this UDF Long AD. Parameters: None. Returns: A string representing this UDF Long AD.
385,179
def _getID(self): id = [] for key in self._sqlPrimary: value = self.__dict__[key] if isinstance(value, Forgetter): value.save() try: (value,) = value._getID() except: raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value) id.append(value) return id
Get the ID values as a tuple annotated by sqlPrimary
385,180
def getSystemVariable(self, remote, name): if self._server is not None: return self._server.getSystemVariable(remote, name)
Get single system variable from CCU / Homegear
385,181
def point_image_value(image, xy, scale=1): return getinfo(ee.Image(image).reduceRegion( reducer=ee.Reducer.first(), geometry=ee.Geometry.Point(xy), scale=scale))
Extract the output value from a calculation at a point
385,182
def rename_categories(self, new_categories, inplace=False): inplace = validate_bool_kwarg(inplace, ) cat = self if inplace else self.copy() if isinstance(new_categories, ABCSeries): msg = ("Treating Series as a list-like and using " "the values. In a future version, will " "treat Series like a dictionary.\n" "For dict-like, use \n" "For list-like, use .") warn(msg, FutureWarning, stacklevel=2) new_categories = list(new_categories) if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] elif callable(new_categories): cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if not inplace: return cat
Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B]
385,183
def create_expanded_design_for_mixing(design, draw_list, mixing_pos, rows_to_mixers): if len(mixing_pos) != len(draw_list): msg = "mixing_pos == {}".format(mixing_pos) msg_2 = "len(draw_list) == {}".format(len(draw_list)) raise ValueError(msg + "\n" + msg_2) num_draws = draw_list[0].shape[1] orig_num_vars = design.shape[1] arrays_for_mixing = design[:, mixing_pos] expanded_design = np.concatenate((design, arrays_for_mixing), axis=1).copy() design_3d = np.repeat(expanded_design[:, None, :], repeats=num_draws, axis=1) for pos, idx in enumerate(mixing_pos): rel_draws = draw_list[pos] rel_long_draws = rows_to_mixers.dot(rel_draws) design_3d[:, :, orig_num_vars + pos] *= rel_long_draws return design_3d
Parameters ---------- design : 2D ndarray. All elements should be ints, floats, or longs. Each row corresponds to an available alternative for a given individual. There should be one column per index coefficient being estimated. draw_list : list of 2D ndarrays. All numpy arrays should have the same number of columns (`num_draws`) and the same number of rows (`num_mixers`). All elements of the numpy arrays should be ints, floats, or longs. Should have as many elements as there are lements in `mixing_pos`. mixing_pos : list of ints. Each element should denote a column in design whose associated index coefficient is being treated as a random variable. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. Returns ------- design_3d : 3D numpy array. Each slice of the third dimension will contain a copy of the design matrix corresponding to a given draw of the random variables being mixed over.
385,184
def tag(self, sbo): sbo_name = "-".join(sbo.split("-")[:-1]) find = GetFromInstalled(sbo_name).name() if find_package(sbo, self.meta.pkg_path): paint = self.meta.color["GREEN"] self.count_ins += 1 if "--rebuild" in self.flag: self.count_upg += 1 elif sbo_name == find: paint = self.meta.color["YELLOW"] self.count_upg += 1 else: paint = self.meta.color["RED"] self.count_uni += 1 return paint
Tag with color green if package already installed, color yellow for packages to upgrade and color red if not installed.
385,185
def register_variable(self, v, key, eternal=True): if type(key) is not tuple: raise TypeError("Variable tracking key must be a tuple") if eternal: self.eternal_tracked_variables[key] = v else: self.temporal_tracked_variables = dict(self.temporal_tracked_variables) ctrkey = key + (None,) ctrval = self.temporal_tracked_variables.get(ctrkey, 0) + 1 self.temporal_tracked_variables[ctrkey] = ctrval tempkey = key + (ctrval,) self.temporal_tracked_variables[tempkey] = v
Register a value with the variable tracking system :param v: The BVS to register :param key: A tuple to register the variable under :parma eternal: Whether this is an eternal variable, default True. If False, an incrementing counter will be appended to the key.
385,186
def publish_extensions(self, handler): if isinstance(self.media_content, list): [PyRSS2Gen._opt_element(handler, "media:content", mc_element) for mc_element in self.media_content] else: PyRSS2Gen._opt_element(handler, "media:content", self.media_content) if hasattr(self, ): PyRSS2Gen._opt_element(handler, "media:title", self.media_title) if hasattr(self, ): PyRSS2Gen._opt_element(handler, "media:text", self.media_text)
Publish the Media RSS Feed elements as XML.
385,187
def _count(self, X, Y): self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0)
Count and smooth feature occurrences.
385,188
def get_commit_req(self): if not self.commit_req: self.commit_req = datastore.CommitRequest() self.commit_req.transaction = self.tx return self.commit_req
Lazy commit request getter.
385,189
def _format_return_timestamps(self, return_timestamps=None): if return_timestamps is None: return_timestamps_array = np.arange( self.components.initial_time(), self.components.final_time() + self.components.saveper(), self.components.saveper(), dtype=np.float64 ) elif inspect.isclass(range) and isinstance(return_timestamps, range): return_timestamps_array = np.array(return_timestamps, ndmin=1) elif isinstance(return_timestamps, (list, int, float, np.ndarray)): return_timestamps_array = np.array(return_timestamps, ndmin=1) elif isinstance(return_timestamps, _pd.Series): return_timestamps_array = return_timestamps.as_matrix() else: raise TypeError( ) return return_timestamps_array
Format the passed in return timestamps value as a numpy array. If no value is passed, build up array of timestamps based upon model start and end times, and the 'saveper' value.
385,190
def json_2_team(json_obj): LOGGER.debug("Team.json_2_team") return Team(teamid=json_obj[], name=json_obj[], description=json_obj[], color_code=json_obj[], app_ids=json_obj[], osi_ids=json_obj[])
transform JSON obj coming from Ariane to ariane_clip3 object :param json_obj: the JSON obj coming from Ariane :return: ariane_clip3 Team object
385,191
def canonic(self, file_name): if file_name == "<" + file_name[1:-1] + ">": return file_name c_file_name = self.file_name_cache.get(file_name) if not c_file_name: c_file_name = os.path.abspath(file_name) c_file_name = os.path.normcase(c_file_name) self.file_name_cache[file_name] = c_file_name return c_file_name
returns canonical version of a file name. A canonical file name is an absolute, lowercase normalized path to a given file.
385,192
def filter_dict_by_key(d, keys): return {k: v for k, v in d.items() if k in keys}
Filter the dict *d* to remove keys not in *keys*.
385,193
def list(self, device_path, timeout_ms=None): return self.filesync_service.list( device_path, timeouts.PolledTimeout.from_millis(timeout_ms))
Yield filesync_service.DeviceFileStat objects for directory contents.
385,194
def scale(self, new_volume: float) -> "Lattice": versors = self.matrix / self.abc geo_factor = abs(dot(np.cross(versors[0], versors[1]), versors[2])) ratios = np.array(self.abc) / self.c new_c = (new_volume / (geo_factor * np.prod(ratios))) ** (1 / 3.0) return Lattice(versors * (new_c * ratios))
Return a new Lattice with volume new_volume by performing a scaling of the lattice vectors so that length proportions and angles are preserved. Args: new_volume: New volume to scale to. Returns: New lattice with desired volume.
385,195
def _validate_apns_certificate(self, certfile): try: with open(certfile, "r") as f: content = f.read() check_apns_certificate(content) except Exception as e: raise ImproperlyConfigured( "The APNS certificate file at %r is not readable: %s" % (certfile, e) )
Validate the APNS certificate at startup.
385,196
async def inject_request_id(app, handler): async def trace_request(request): request[] = \ request.headers.get() or str(uuid.uuid4()) return await handler(request) return trace_request
aiohttp middleware: ensures each request has a unique request ID. See: ``inject_request_id``.
385,197
def clear_cached_authc_info(self, identifier): msg = "Clearing cached authc_info for [{0}]".format(identifier) logger.debug(msg) self.cache_handler.delete( + self.name, identifier)
When cached credentials are no longer needed, they can be manually cleared with this method. However, account credentials may be cached with a short expiration time (TTL), making the manual clearing of cached credentials an alternative use case. :param identifier: the identifier of a specific source, extracted from the SimpleIdentifierCollection (identifiers)
385,198
def is_propagating(self, images, augmenter, parents, default): if self.propagator is None: return default else: return self.propagator(images, augmenter, parents, default)
Returns whether an augmenter may call its children to augment an image. This is independent of the augmenter itself possible changing the image, without calling its children. (Most (all?) augmenters with children currently dont perform any changes themselves.) Returns ------- bool If True, the augmenter may be propagate to its children. If False, it may not.
385,199
def size_changed(self, settings, key, user_data): RectCalculator.set_final_window_rect(self.settings, self.guake.window)
If the gconf var window_height or window_width are changed, this method will be called and will call the resize function in guake.