Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,500
def put(self, endpoint, data, **kwargs): return self.__request("PUT", endpoint, data, **kwargs)
PUT requests
10,501
def get_sigla(self, work): return [os.path.splitext(os.path.basename(path))[0] for path in glob.glob(os.path.join(self._path, work, ))]
Returns a list of all of the sigla for `work`. :param work: name of work :type work: `str` :rtype: `list` of `str`
10,502
def run(self): logger.info(, self) ret = self.job_func() self.last_run = datetime.datetime.now() self._schedule_next_run() return ret
Run the job and immediately reschedule it. :return: The return value returned by the `job_func`
10,503
def multipart_complete(self, multipart): multipart.complete() db.session.commit() version_id = str(uuid.uuid4()) return self.make_response( data=multipart, context={ : MultipartObject, : multipart.bucket, : version_id, }, task_result=merge_multipartobject.delay( str(multipart.upload_id), version_id=version_id, ), )
Complete a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response.
10,504
def set(self, handler, attr, name, path, cfg): full_name = ("%s.%s" % (path, name)).strip(".") if attr.default is None: default = None else: try: comp = vodka.component.Component(cfg) default = handler.default(name, inst=comp) if self.skip_defaults: self.echo("%s: %s [default]" % (full_name, default)) return default except Exception: raise self.echo("") self.echo(attr.help_text) if attr.choices: self.echo("choices: %s" % ", ".join([str(c) for c in attr.choices])) b = False while not b: try: if type(attr.expected_type) == type: r = self.prompt(full_name, default=default, type=attr.expected_type) r = attr.expected_type(r) else: r = self.prompt(full_name, default=default, type=str) except ValueError: self.echo("Value expected to be of type %s"% attr.expected_type) try: b = handler.check({name:r}, name, path) except Exception as inst: if hasattr(inst, "explanation"): self.echo(inst.explanation) else: raise return r
Obtain value for config variable, by prompting the user for input and substituting a default value if needed. Also does validation on user input
10,505
def _copy(self, filename, dir1, dir2): if self._copyfiles: rel_path = filename.replace(, ).split() rel_dir = .join(rel_path[:-1]) filename = rel_path[-1] dir2_root = dir2 dir1 = os.path.join(dir1, rel_dir) dir2 = os.path.join(dir2, rel_dir) if self._verbose: self.log( % (filename, dir1, dir2)) try: if self._copydirection == 0 or self._copydirection == 2: if not os.path.exists(dir2): if self._forcecopy: os.chmod(os.path.dirname(dir2_root), 1911) try: os.makedirs(dir2) self._numnewdirs += 1 except OSError as e: self.log(str(e)) self._numdirsfld += 1 if self._forcecopy: os.chmod(dir2, 1911) sourcefile = os.path.join(dir1, filename) try: if os.path.islink(sourcefile): os.symlink(os.readlink(sourcefile), os.path.join(dir2, filename)) else: shutil.copy2(sourcefile, dir2) self._numfiles += 1 except (IOError, OSError) as e: self.log(str(e)) self._numcopyfld += 1 if self._copydirection == 1 or self._copydirection == 2: if not os.path.exists(dir1): if self._forcecopy: os.chmod(os.path.dirname(self.dir1_root), 1911) try: os.makedirs(dir1) self._numnewdirs += 1 except OSError as e: self.log(str(e)) self._numdirsfld += 1 targetfile = os.path.abspath(os.path.join(dir1, filename)) if self._forcecopy: os.chmod(dir1, 1911) sourcefile = os.path.join(dir2, filename) try: if os.path.islink(sourcefile): os.symlink(os.readlink(sourcefile), os.path.join(dir1, filename)) else: shutil.copy2(sourcefile, targetfile) self._numfiles += 1 except (IOError, OSError) as e: self.log(str(e)) self._numcopyfld += 1 except Exception as e: self.log( % filename) self.log(str(e))
Private function for copying a file
10,506
async def process_request(self, path, headers): path_portion, _, query_string = path.partition("?") websockets.handshake.check_request(headers) subprotocols = [] for header in headers.get_all("Sec-WebSocket-Protocol"): subprotocols.extend([token.strip() for token in header.split(",")]) asgi_headers = [ (name.encode("ascii"), value.encode("ascii")) for name, value in headers.raw_items() ] self.scope = { "type": "websocket", "scheme": self.scheme, "server": self.server, "client": self.client, "root_path": self.root_path, "path": unquote(path_portion), "query_string": query_string.encode("ascii"), "headers": asgi_headers, "subprotocols": subprotocols, } task = self.loop.create_task(self.run_asgi()) task.add_done_callback(self.on_task_complete) self.tasks.add(task) await self.handshake_started_event.wait() return self.initial_response
This hook is called to determine if the websocket should return an HTTP response and close. Our behavior here is to start the ASGI application, and then wait for either `accept` or `close` in order to determine if we should close the connection.
10,507
def backtrace_on_usr1 (): import signal try: signal.signal (signal.SIGUSR1, _print_backtrace_signal_handler) except Exception as e: warn (, e)
Install a signal handler such that this program prints a Python traceback upon receipt of SIGUSR1. This could be useful for checking that long-running programs are behaving properly, or for discovering where an infinite loop is occurring. Note, however, that the Python interpreter does not invoke Python signal handlers exactly when the process is signaled. For instance, a signal delivered in the midst of a time.sleep() call will only be seen by Python code after that call completes. This means that this feature may not be as helpful as one might like for debugging certain kinds of problems.
10,508
def _freq_parser(self, freq): freq = freq.lower().strip() try: if "day" in freq: freq = freq.replace("day", "") return timedelta(days=int(freq)) elif "hour" in freq: freq = freq.replace("hour", "") return timedelta(hours=int(freq)) elif "min" in freq: freq = freq.replace("min", "") return timedelta(minutes=int(freq)) elif "sec" in freq: freq = freq.replace("sec", "") return timedelta(seconds=int(freq)) else: raise Exception("%s is invalid format. use day, hour, min, sec." % freq) except: raise Exception("%s is invalid format. use day, hour, min, sec." % freq)
day, hour, min, sec,
10,509
def scrap(self, url=None, scheme=None, timeout=None, html_parser=None, cache_ext=None ): if not url: url = self.url if not scheme: scheme = self.scheme if not timeout: timeout = self.timeout if not html_parser: html_parser = self.html_parser if not scheme: raise WEBParameterException("Missing scheme definition") if not url: raise WEBParameterException("Missing url definition") resp = self.get(url, timeout, cache_ext=cache_ext) soup = BeautifulSoup(resp.html, html_parser) resp.scraped = self._parse_scheme(soup, scheme) return resp
Scrap a url and parse the content according to scheme :param url: Url to parse (default: self._url) :type url: str :param scheme: Scheme to apply to html (default: self._scheme) :type scheme: dict :param timeout: Timeout for http operation (default: self._timout) :type timeout: float :param html_parser: What html parser to use (default: self._html_parser) :type html_parser: str | unicode :param cache_ext: External cache info :type cache_ext: floscraper.models.CacheInfo :return: Response data from url and parsed info :rtype: floscraper.models.Response :raises WEBConnectException: HTTP get failed :raises WEBParameterException: Missing scheme or url
10,510
def untokenized_tfds_dataset(dataset_name=gin.REQUIRED, text2self=gin.REQUIRED, tfds_data_dir=gin.REQUIRED, dataset_split=gin.REQUIRED, batch_size=gin.REQUIRED, sequence_length=gin.REQUIRED, vocabulary=gin.REQUIRED, pack=gin.REQUIRED): dataset = tfds.load( dataset_name, split=dataset_split, as_supervised=True, data_dir=tfds_data_dir) if dataset_split == "train": dataset = dataset.repeat() dataset = dataset.shuffle(1000) dataset = supervised_to_dict(dataset, text2self) dataset = encode_all_features(dataset, vocabulary) return pack_and_batch(dataset, batch_size, sequence_length, pack)
Reads a tensorflow_datasets dataset. Returns a tf.data.Dataset containing single tokenized examples where each feature ends in EOS=1. Args: dataset_name: a string text2self: a boolean tfds_data_dir: a boolean dataset_split: a string batch_size: an integer sequence_length: an integer vocabulary: a vocabulary.Vocabulary pack: if True, multiple examples emitted by load_internal() are concatenated to form one combined example. Returns: a tf.data.Dataset of batches
10,511
def heterogzygote_counts(paired): work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(paired.tumor_data), "structural", "counts")) key = "germline_het_pon" het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data) vr = bedutils.population_variant_regions([x for x in [paired.tumor_data, paired.normal_data] if x]) cur_het_bed = bedutils.intersect_two(het_bed, vr, work_dir, paired.tumor_data) tumor_counts = _run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.tumor_data) normal_counts = (_run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.normal_data) if paired.normal_data else None) if normal_counts: tumor_counts, normal_counts = _filter_by_normal(tumor_counts, normal_counts, paired.tumor_data) return tumor_counts, normal_counts
Provide tumor/normal counts at population heterozyogte sites with CollectAllelicCounts.
10,512
def Ra(L: float, Ts: float, Tf: float, alpha: float, beta: float, nu: float ) -> float: return g * beta * (Ts - Tinf) * L**3.0 / (nu * alpha)
Calculate the Ralleigh number. :param L: [m] heat transfer surface characteristic length. :param Ts: [K] heat transfer surface temperature. :param Tf: [K] bulk fluid temperature. :param alpha: [m2/s] fluid thermal diffusivity. :param beta: [1/K] fluid coefficient of thermal expansion. :param nu: [m2/s] fluid kinematic viscosity. :returns: float Ra = Gr*Pr Characteristic dimensions: * vertical plate: vertical length * pipe: diameter * bluff body: diameter
10,513
def create_secgroups(self): utils.banner("Creating Security Group") sgobj = securitygroup.SpinnakerSecurityGroup( app=self.app, env=self.env, region=self.region, prop_path=self.json_path) sgobj.create_security_group()
Create security groups as defined in the configs.
10,514
def list_targets_by_rule(client=None, **kwargs): result = client.list_targets_by_rule(**kwargs) if not result.get("Targets"): result.update({"Targets": []}) return result
Rule='string'
10,515
def alwaysCalledWith(self, *args, **kwargs): self.__get_func = SinonSpy.__get_directly return self.alwaysCalledWithMatch(*args, **kwargs)
Determining whether args/kwargs are the ONLY args/kwargs called previously Eg. f(1, 2, 3) f(1, 2, 3) spy.alwaysCalledWith(1, 2) will return True, because they are the ONLY called args f(1, 3) spy.alwaysCalledWith(1) will return True, because 1 is the ONLY called args spy.alwaysCalledWith(1, 2) will return False, because 2 is not the ONLY called args Return: Boolean
10,516
def get_metric_index(self, metric_cls): ds = self.class2ds[metric_cls.ds] if self.index_dict[ds]: index_name = self.index_dict[ds] else: index_name = self.ds2index[metric_cls.ds] return index_name
Get the index name with the data for a metric class :param metric_cls: a metric class :return: the name of the index with the data for the metric
10,517
def find_recent(self, nrecent=4): try: rows = self.cur.execute("SELECT noteId FROM note WHERE book > 0 ORDER BY date DESC LIMIT %d;"%nrecent).fetchall() except: self.error("nota.find_recent() cannot look up note list") noteIds = [] for r in rows: noteIds.append(r[0],) self.fyi("noteIds: %s" % noteIds) rval = [] for n in noteIds: note = None try: note = self.cur.execute("SELECT noteId, date, title, content, hash, book FROM note WHERE noteId = ?;", [n]).fetchone() except: self.warning("Problem extracting note %s from database for recent-list" % n) next if note: keywordIds = [] keywordIds.extend(self.con.execute("SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;", [n])) keywords = [] for k in keywordIds: keywords.append(self.cur.execute("SELECT keyword FROM keyword WHERE keywordId=?;", k).fetchone()[0]) rval.append({"noteId":note[0], "date":note[1], "title":note[2], "keywords":keywords, "content":note[3], "hash":note[4], "book":note[5]}) return rval
Find recent non-trashed notes
10,518
def as_spectrum(self, binned=True): if binned: wave, flux = self.binwave, self.binflux else: wave, flux = self.wave, self.flux result = ArraySourceSpectrum(wave, flux, self.waveunits, self.fluxunits, name = self.name, keepneg = True) return result
Reduce the observation to a simple spectrum object. An observation is a complex object with some restrictions on its capabilities. At times, it would be useful to work with the simulated observation as a simple object that is easier to manipulate and takes up less memory. Parameters ---------- binned : bool If `True` (default), export binned dataset. Otherwise, native. Returns ------- result : `~pysynphot.spectrum.ArraySourceSpectrum` Observation dataset as a simple spectrum object.
10,519
def get_relations(self, cursor, table_name): def table2model(table_name): return SfProtectName(table_name).title().replace(, ).replace(, ) global last_introspected_model, last_read_only, last_refs global last_with_important_related_name result = {} reverse = {} last_with_important_related_name = [] last_read_only = {} last_refs = {} for _, field in enumerate(self.table_description_cache(table_name)[]): if field[] == and field[]: reference_to_name = SfProtectName(field[][0]) relationship_order = field[] if relationship_order is None: relationship_tmp = set() for rel in field[]: for chld in self.table_description_cache(rel)[]: if chld[] == table_name and chld[] == field[]: relationship_tmp.add(chld[]) assert len(relationship_tmp) <= 1 if True in relationship_tmp: relationship_order = last_refs[field[]] = (field[], relationship_order) result[field[]] = (, reference_to_name) reverse.setdefault(reference_to_name, []).append(field[]) if not field[] or not field[]: sf_read_only = (0 if field[] else 1) | (0 if field[] else 2) last_read_only[field[]] = reverse_models_names[sf_read_only] for ref, ilist in reverse.items(): back_name_collisions = [ x[] for x in self.table_description_cache(ref)[] if re.sub( if x[] == else , , re.sub(, , x[]) ).replace(, ).lower() == table2model(table_name).lower()] if len(ilist) > 1 or back_name_collisions: last_with_important_related_name.extend(ilist) last_introspected_model = table2model(table_name) return result
Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based.
10,520
def allclose_up_to_global_phase( a: np.ndarray, b: np.ndarray, *, rtol: float = 1.e-5, atol: float = 1.e-8, equal_nan: bool = False ) -> bool: a, b = transformations.match_global_phase(a, b) return np.allclose(a=a, b=b, rtol=rtol, atol=atol, equal_nan=equal_nan)
Determines if a ~= b * exp(i t) for some t. Args: a: A numpy array. b: Another numpy array. rtol: Relative error tolerance. atol: Absolute error tolerance. equal_nan: Whether or not NaN entries should be considered equal to other NaN entries.
10,521
def exists_type(self, using=None, **kwargs): return self._get_connection(using).indices.exists_type(index=self._name, **kwargs)
Check if a type/types exists in the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists_type`` unchanged.
10,522
def count(args): counts = defaultdict(int) for arg in args: for item in arg: counts[item] = counts[item] + 1 return counts
count occurences in a list of lists >>> count([['a','b'],['a']]) defaultdict(int, {'a' : 2, 'b' : 1})
10,523
def register_rpc(self, address, rpc_id, func): if rpc_id < 0 or rpc_id > 0xFFFF: raise RPCInvalidIDError("Invalid RPC ID: {}".format(rpc_id)) if address not in self._rpc_overlays: self._rpc_overlays[address] = RPCDispatcher() self._rpc_overlays[address].add_rpc(rpc_id, func)
Register a single RPC handler with the given info. This function can be used to directly register individual RPCs, rather than delegating all RPCs at a given address to a virtual Tile. If calls to this function are mixed with calls to add_tile for the same address, these RPCs will take precedence over what is defined in the tiles. Args: address (int): The address of the mock tile this RPC is for rpc_id (int): The number of the RPC func (callable): The function that should be called to handle the RPC. func is called as func(payload) and must return a single string object of up to 20 bytes with its response
10,524
def goto(directory, create=False): current = os.getcwd() directory = os.path.abspath(directory) if os.path.isdir(directory) or (create and mkdir(directory)): logger.info("goto -> %s", directory) os.chdir(directory) try: yield True finally: logger.info("goto <- %s", directory) os.chdir(current) else: logger.info( "goto(%s) - directory does not exist, or cannot be " "created.", directory, ) yield False
Context object for changing directory. Args: directory (str): Directory to go to. create (bool): Create directory if it doesn't exists. Usage:: >>> with goto(directory) as ok: ... if not ok: ... print 'Error' ... else: ... print 'All OK'
10,525
def do_execute(self): result = None cont = self.input.payload serialization.write_all( str(self.resolve_option("output")), [cont.get("Model").jobject, cont.get("Header").jobject]) return result
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
10,526
def localize_sql(self, sql: str) -> str: if self.db_pythonlib in [PYTHONLIB_PYMYSQL, PYTHONLIB_MYSQLDB]: sql = _PERCENT_REGEX.sub("%%", sql) sql = _QUERY_VALUE_REGEX.sub("%s", sql) return sql
Translates ?-placeholder SQL to appropriate dialect. For example, MySQLdb uses %s rather than ?.
10,527
async def register(self, check, *, token=None): token_id = extract_attr(token, keys=["ID"]) params = {"token": token_id} response = await self._api.put("/v1/agent/check/register", params=params, data=check) return response.status == 200
Registers a new local check Parameters: check (Object): Check definition token (ObjectID): Token ID Returns: bool: ``True`` on success The register endpoint is used to add a new check to the local agent. Checks may be of script, HTTP, TCP, or TTL type. The agent is responsible for managing the status of the check and keeping the Catalog in sync. The request body must look like:: { "ID": "mem", "Name": "Memory utilization", "Notes": "Ensure we don't oversubscribe memory", "DeregisterCriticalServiceAfter": "90m", "Script": "/usr/local/bin/check_mem.py", "DockerContainerID": "f972c95ebf0e", "Shell": "/bin/bash", "HTTP": "http://example.com", "TCP": "example.com:22", "Interval": timedelta(seconds=10), "TTL": timedelta(seconds=15) } The **Name** field is mandatory, as is one of **Script**, **HTTP**, **TCP** or **TTL**. **Script**, **TCP** and **HTTP** also require that **Interval** be set. If an **ID** is not provided, it is set to **Name**. You cannot have duplicate **ID** entries per agent, so it may be necessary to provide an **ID**. The **Notes** field is not used internally by Consul and is meant to be human-readable. Checks that are associated with a service may also contain an optional **DeregisterCriticalServiceAfter** field, which is a timeout in the same duration format as **Interval** and **TTL**. If a check is in the critical state for more than this configured value, then its associated service (and all of its associated checks) will automatically be deregistered. The minimum timeout is 1 minute, and the process that reaps critical services runs every 30 seconds, so it may take slightly longer than the configured timeout to trigger the deregistration. This should generally be configured with a timeout that's much, much longer than any expected recoverable outage for the given service. If a **Script** is provided, the check type is a script, and Consul will evaluate the script every **Interval** to update the status. If a **DockerContainerID** is provided, the check is a Docker check, and Consul will evaluate the script every **Interval** in the given container using the specified Shell. Note that Shell is currently only supported for Docker checks. An **HTTP** check will perform an HTTP GET request against the value of **HTTP** (expected to be a URL) every **Interval**. If the response is any 2xx code, the check is passing. If the response is ``429 Too Many Requests``, the check is **warning**. Otherwise, the check is **critical**. An **TCP** check will perform an TCP connection attempt against the value of **TCP** (expected to be an IP/hostname and port combination) every **Interval**. If the connection attempt is successful, the check is **passing**. If the connection attempt is unsuccessful, the check is **critical**. In the case of a hostname that resolves to both IPv4 and IPv6 addresses, an attempt will be made to both addresses, and the first successful connection attempt will result in a successful check. If a **TTL** type is used, then the TTL update endpoint must be used periodically to update the state of the check. The **ServiceID** field can be provided to associate the registered check with an existing service provided by the agent. The **Status** field can be provided to specify the initial state of the health check.
10,528
def preprocess_mnist(sc, options): train_data = get_mnist(sc, "train", options.dataPath)\ .map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD), rec_tuple[1]))\ .map(lambda t: Sample.from_ndarray(t[0], t[1])) test_data = get_mnist(sc, "test", options.dataPath)\ .map(lambda rec_tuple: (normalizer(rec_tuple[0], mnist.TEST_MEAN, mnist.TEST_STD), rec_tuple[1]))\ .map(lambda t: Sample.from_ndarray(t[0], t[1])) return train_data, test_data
Preprocess mnist dataset. Normalize and transform into Sample of RDDs.
10,529
def push_group_with_content(self, content): cairo.cairo_push_group_with_content(self._pointer, content) self._check_status()
Temporarily redirects drawing to an intermediate surface known as a group. The redirection lasts until the group is completed by a call to :meth:`pop_group` or :meth:`pop_group_to_source`. These calls provide the result of any drawing to the group as a pattern, (either as an explicit object, or set as the source pattern). The group will have a content type of :obj:`content`. The ability to control this content type is the only distinction between this method and :meth:`push_group` which you should see for a more detailed description of group rendering. :param content: A :ref:`CONTENT` string.
10,530
def is_authoring_node(self, node): for parent_node in foundations.walkers.nodes_walker(node, ascendants=True): if parent_node is self.__default_project_node: return True return False
Returns if given Node is an authoring node. :param node: Node. :type node: ProjectNode or DirectoryNode or FileNode :return: Is authoring node. :rtype: bool
10,531
def on_connect_button__clicked(self, event): hub_uri = self.plugin_uri.get_text() ui_plugin_name = self.ui_plugin_name.get_text() plugin = self.create_plugin(ui_plugin_name, hub_uri) self.init_plugin(plugin) self.connect_button.set_sensitive(False) self.emit(, plugin)
Connect to Zero MQ plugin hub (`zmq_plugin.hub.Hub`) using the settings from the text entry fields (e.g., hub URI, plugin name). Emit `plugin-connected` signal with the new plugin instance after hub connection has been established.
10,532
def source_channels(self): source_channels = [v.coordinates.keys() for v in self.verts] return set(itertools.chain(*source_channels))
Returns a set describing the source channels on which the gate is defined.
10,533
def read(path, encoding="utf-8"): try: with io.open(path, encoding=encoding) as f: return f.read() except Exception as e: logger.error("read: %s failed. Error: %s", path, e) return ""
Read the content of the file. Args: path (str): Path to the file encoding (str): File encoding. Default: utf-8 Returns: str: File content or empty string if there was an error
10,534
def extract_wavs(utterances: List[Utterance], tgt_dir: Path, lazy: bool) -> None: tgt_dir.mkdir(parents=True, exist_ok=True) for utter in utterances: wav_fn = "{}.{}".format(utter.prefix, "wav") out_wav_path = tgt_dir / wav_fn if lazy and out_wav_path.is_file(): logger.info("File {} already exists and lazy == {}; not " \ "writing.".format(out_wav_path, lazy)) continue logger.info("File {} does not exist and lazy == {}; creating " \ "it.".format(out_wav_path, lazy)) trim_wav_ms(utter.org_media_path, out_wav_path, utter.start_time, utter.end_time)
Extracts WAVs from the media files associated with a list of Utterance objects and stores it in a target directory. Args: utterances: A list of Utterance objects, which include information about the source media file, and the offset of the utterance in the media_file. tgt_dir: The directory in which to write the output WAVs. lazy: If True, then existing WAVs will not be overwritten if they have the same name
10,535
def annotate_with_depth(in_file, items): bam_file = None if len(items) == 1: bam_file = dd.get_align_bam(items[0]) else: paired = vcfutils.get_paired(items) if paired: bam_file = paired.tumor_bam if bam_file: out_file = "%s-duphold.vcf.gz" % utils.splitext_plus(in_file)[0] if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: if not in_file.endswith(".gz"): in_file = vcfutils.bgzip_and_index(in_file, remove_orig=False, out_dir=os.path.dirname(tx_out_file)) ref_file = dd.get_ref_file(items[0]) cores = min([dd.get_num_cores(items[0]), 4]) cmd = ("duphold --threads {cores} --vcf {in_file} --bam {bam_file} --fasta {ref_file} " "-o {tx_out_file}") do.run(cmd.format(**locals()), "Annotate SV depth with duphold") vcfutils.bgzip_and_index(out_file) return out_file else: return in_file
Annotate called VCF file with depth using duphold (https://github.com/brentp/duphold) Currently annotates single sample and tumor samples in somatic analysis.
10,536
def render_template(self, template_file, target_file, template_vars = {}): template_dir = str(self.__class__.__name__).lower() template = self.jinja_env.get_template(os.path.join(template_dir, template_file)) file_path = os.path.join(self.work_root, target_file) with open(file_path, ) as f: f.write(template.render(template_vars))
Render a Jinja2 template for the backend The template file is expected in the directory templates/BACKEND_NAME.
10,537
def _wrap_attr(self, attrs, context=None): for attr in attrs: if isinstance(attr, UnboundMethod): if _is_property(attr): yield from attr.infer_call_result(self, context) else: yield BoundMethod(attr, self) elif hasattr(attr, "name") and attr.name == "<lambda>": if attr.args.args and attr.args.args[0].name == "self": yield BoundMethod(attr, self) continue yield attr else: yield attr
wrap bound methods of attrs in a InstanceMethod proxies
10,538
def colname_gen(df,col_name = ): if col_name not in df.keys(): yield col_name id_number = 0 while True: col_name = col_name + str(id_number) if col_name in df.keys(): id_number+=1 else: return col_name
Returns a column name that isn't in the specified DataFrame Parameters: df - DataFrame DataFrame to analyze col_name - string, default 'unnamed_col' Column name to use as the base value for the generated column name
10,539
def MapFields(function, key=True): @use_raw_input def _MapFields(bag): try: factory = type(bag)._make except AttributeError: factory = type(bag) if callable(key): try: fields = bag._fields except AttributeError as e: raise UnrecoverableAttributeError( ) from e return factory( function(value) if key(key_) else value for key_, value in zip(fields, bag) ) elif key: return factory(function(value) for value in bag) else: return NOT_MODIFIED return _MapFields
Transformation factory that maps `function` on the values of a row. It can be applied either to 1. all columns (`key=True`), 2. no column (`key=False`), or 3. a subset of columns by passing a callable, which takes column name and returns `bool` (same as the parameter `function` in `filter`). :param function: callable :param key: bool or callable :return: callable
10,540
def filter_expr(cls_or_alias, **filters): if isinstance(cls_or_alias, AliasedClass): mapper, cls = cls_or_alias, inspect(cls_or_alias).mapper.class_ else: mapper = cls = cls_or_alias expressions = [] valid_attributes = cls.filterable_attributes for attr, value in filters.items(): if attr in cls.hybrid_methods: method = getattr(cls, attr) expressions.append(method(value, mapper=mapper)) else: if OPERATOR_SPLITTER in attr: attr_name, op_name = attr.rsplit(OPERATOR_SPLITTER, 1) if op_name not in cls._operators: raise KeyError( .format(attr, op_name)) op = cls._operators[op_name] else: attr_name, op = attr, operators.eq if attr_name not in valid_attributes: raise KeyError( .format(attr, attr_name)) column = getattr(mapper, attr_name) expressions.append(op(column, value)) return expressions
forms expressions like [Product.age_from = 5, Product.subject_ids.in_([1,2])] from filters like {'age_from': 5, 'subject_ids__in': [1,2]} Example 1: db.query(Product).filter( *Product.filter_expr(age_from = 5, subject_ids__in=[1, 2])) Example 2: filters = {'age_from': 5, 'subject_ids__in': [1,2]} db.query(Product).filter(*Product.filter_expr(**filters)) ### About alias ###: If we will use alias: alias = aliased(Product) # table name will be product_1 we can't just write query like db.query(alias).filter(*Product.filter_expr(age_from=5)) because it will be compiled to SELECT * FROM product_1 WHERE product.age_from=5 which is wrong: we select from 'product_1' but filter on 'product' such filter will not work We need to obtain SELECT * FROM product_1 WHERE product_1.age_from=5 For such case, we can call filter_expr ON ALIAS: alias = aliased(Product) db.query(alias).filter(*alias.filter_expr(age_from=5)) Alias realization details: * we allow to call this method either ON ALIAS (say, alias.filter_expr()) or on class (Product.filter_expr()) * when method is called on alias, we need to generate SQL using aliased table (say, product_1), but we also need to have a real class to call methods on (say, Product.relations) * so, we have 'mapper' that holds table name and 'cls' that holds real class when we call this method ON ALIAS, we will have: mapper = <product_1 table> cls = <Product> when we call this method ON CLASS, we will simply have: mapper = <Product> (or we could write <Product>.__mapper__. It doesn't matter because when we call <Product>.getattr, SA will magically call <Product>.__mapper__.getattr()) cls = <Product>
10,541
def max_rigid_id(self): try: return max([particle.rigid_id for particle in self.particles() if particle.rigid_id is not None]) except ValueError: return
Returns the maximum rigid body ID contained in the Compound. This is usually used by compound.root to determine the maximum rigid_id in the containment hierarchy. Returns ------- int or None The maximum rigid body ID contained in the Compound. If no rigid body IDs are found, None is returned
10,542
def unsubscribe_all(self): for channel in self.list_all(): channel.ensure_stopped() self.connect_api.stop_notifications()
Unsubscribes all channels
10,543
def get_translation_args(self, args): translation_args = [] for arg in args: condition = self._get_linguist_condition(arg, transform=True) if condition: translation_args.append(condition) return translation_args
Returns linguist args from model args.
10,544
def light(self): sun = self.chart.getObject(const.SUN) return light(self.obj, sun)
Returns if object is augmenting or diminishing its light.
10,545
def install_signal_trap(signums = (signal.SIGTERM, signal.SIGTSTP), retval = 1): signums = set(signums) - set(origactions) def temporary_file_cleanup_on_signal(signum, frame): with temporary_files_lock: temporary_files.clear() if callable(origactions[signum]): return origactions[signum](signum, frame) sys.exit(retval) for signum in signums: origactions[signum] = signal.getsignal(signum) if origactions[signum] != signal.SIG_IGN: signal.signal(signum, temporary_file_cleanup_on_signal)
Installs a signal handler to erase temporary scratch files when a signal is received. This can be used to help ensure scratch files are erased when jobs are evicted by Condor. signums is a squence of the signals to trap, the default value is a list of the signals used by Condor to kill and/or evict jobs. The logic is as follows. If the current signal handler is signal.SIG_IGN, i.e. the signal is being ignored, then the signal handler is not modified since the reception of that signal would not normally cause a scratch file to be leaked. Otherwise a signal handler is installed that erases the scratch files. If the original signal handler was a Python callable, then after the scratch files are erased the original signal handler will be invoked. If program control returns from that handler, i.e. that handler does not cause the interpreter to exit, then sys.exit() is invoked and retval is returned to the shell as the exit code. Note: by invoking sys.exit(), the signal handler causes the Python interpreter to do a normal shutdown. That means it invokes atexit() handlers, and does other garbage collection tasks that it normally would not do when killed by a signal. Note: this function will not replace a signal handler more than once, that is if it has already been used to set a handler on a signal then it will be a no-op when called again for that signal until uninstall_signal_trap() is used to remove the handler from that signal. Note: this function is called by get_connection_filename() whenever it creates a scratch file.
10,546
def edit(self, image_id, name=None, note=None, tag=None): obj = {} if name: obj[] = name if note: obj[] = note if obj: self.vgbdtg.editObject(obj, id=image_id) if tag: self.vgbdtg.setTags(str(tag), id=image_id) return bool(name or note or tag)
Edit image related details. :param int image_id: The ID of the image :param string name: Name of the Image. :param string note: Note of the image. :param string tag: Tags of the image to be updated to.
10,547
def _expected_condition_find_element(self, element): from toolium.pageelements.page_element import PageElement web_element = False try: if isinstance(element, PageElement): element._web_element = None element._find_web_element() web_element = element._web_element elif isinstance(element, tuple): web_element = self.driver_wrapper.driver.find_element(*element) except NoSuchElementException: pass return web_element
Tries to find the element, but does not thrown an exception if the element is not found :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :returns: the web element if it has been found or False :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
10,548
def delete_map(self, url, map=None, auth_map=None): xml = None if map is not None: xml = dumps_networkapi(map) response_code, content = self.delete(url, xml, , auth_map) return response_code, content
Gera um XML a partir dos dados do dicionário e o envia através de uma requisição DELETE. :param url: URL para enviar a requisição HTTP. :param map: Dicionário com os dados do corpo da requisição HTTP. :param auth_map: Dicionário com as informações para autenticação na networkAPI. :return: Retorna uma tupla contendo: (< código de resposta http >, < corpo da resposta >). :raise ConnectionError: Falha na conexão com a networkAPI. :raise RestError: Falha no acesso à networkAPI.
10,549
def visit_importfrom(self, node): try: logging_name = self._from_imports[node.modname] for module, as_name in node.names: if module == logging_name: self._logging_names.add(as_name or module) except KeyError: pass
Checks to see if a module uses a non-Python logging module.
10,550
def camera_status_encode(self, time_usec, target_system, cam_idx, img_idx, event_id, p1, p2, p3, p4): return MAVLink_camera_status_message(time_usec, target_system, cam_idx, img_idx, event_id, p1, p2, p3, p4)
Camera Event time_usec : Image timestamp (microseconds since UNIX epoch, according to camera clock) (uint64_t) target_system : System ID (uint8_t) cam_idx : Camera ID (uint8_t) img_idx : Image index (uint16_t) event_id : See CAMERA_STATUS_TYPES enum for definition of the bitmask (uint8_t) p1 : Parameter 1 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float) p2 : Parameter 2 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float) p3 : Parameter 3 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float) p4 : Parameter 4 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float)
10,551
def launch_game( players: List[Player], launch_params: Dict[str, Any], show_all: bool, read_overwrite: bool, wait_callback: Callable ) -> None: if not players: raise GameException("at least one player must be specified") game_dir = launch_params["game_dir"] game_name = launch_params["game_name"] if os.path.exists(f"{game_dir}/{game_name}"): logger.info(f"removing existing game results of {game_name}") shutil.rmtree(f"{game_dir}/{game_name}") for nth_player, player in enumerate(players): launch_image(player, nth_player=nth_player, num_players=len(players), **launch_params) logger.debug("checking if game has launched properly...") time.sleep(1) start_containers = running_containers(game_name + "_") if len(start_containers) != len(players): raise DockerException("some containers exited prematurely, please check logs") if not launch_params["headless"]: for index, player in enumerate(players if show_all else players[:1]): port = launch_params["vnc_base_port"] + index host = launch_params["vnc_host"] logger.info(f"launching vnc viewer for {player} on address {host}:{port}") launch_vnc_viewer(host, port) logger.info("\n" "In headful mode, you must specify and start the game manually.\n" "Select the map, wait for bots to join the game " "and then start the game.") logger.info(f"waiting until game {game_name} is finished...") running_time = time.time() while True: containers = running_containers(game_name) if len(containers) == 0: break if len(containers) >= 2: running_time = time.time() if len(containers) == 1 and time.time() - running_time > MAX_TIME_RUNNING_SINGLE_CONTAINER: raise ContainerException( f"One lingering container has been found after single container " f"timeout ({MAX_TIME_RUNNING_SINGLE_CONTAINER} sec), the game probably crashed.") logger.debug(f"waiting. {containers}") wait_callback() exit_codes = [container_exit_code(container) for container in containers] logger.debug("removing game containers") remove_game_containers(game_name) if any(exit_code == EXIT_CODE_REALTIME_OUTED for exit_code in exit_codes): raise RealtimeOutedException(f"some of the game containers has realtime outed.") if any(exit_code == 1 for exit_code in exit_codes): raise ContainerException(f"some of the game containers has finished with error exit code.") if read_overwrite: logger.info("overwriting bot files") for nth_player, player in enumerate(players): if isinstance(player, BotPlayer): logger.debug(f"overwriting files for {player}") distutils.dir_util.copy_tree( f"{game_dir}/{game_name}/write_{nth_player}", player.read_dir )
:raises DockerException, ContainerException, RealtimeOutedException
10,552
def append_qs(url, query_string): parsed_url = urlsplit(url) parsed_qs = parse_qsl(parsed_url.query, True) if isstr(query_string): parsed_qs += parse_qsl(query_string) elif isdict(query_string): for item in list(query_string.items()): if islist(item[1]): for val in item[1]: parsed_qs.append((item[0], val)) else: parsed_qs.append(item) elif islist(query_string): parsed_qs += query_string else: raise TypeError() return urlunsplit(( parsed_url.scheme, parsed_url.netloc, parsed_url.path, urlencode_unicode(parsed_qs), parsed_url.fragment, ))
Append query_string values to an existing URL and return it as a string. query_string can be: * an encoded string: 'test3=val1&test3=val2' * a dict of strings: {'test3': 'val'} * a dict of lists of strings: {'test3': ['val1', 'val2']} * a list of tuples: [('test3', 'val1'), ('test3', 'val2')]
10,553
def setProperty(self, name, value): self._push(self._driver.setProperty, (name, value))
Called by the engine to set a driver property value. @param name: Name of the property @type name: str @param value: Property value @type value: object
10,554
def sinter(self, keys, *args): func = lambda left, right: left.intersection(right) return self._apply_to_sets(func, "SINTER", keys, *args)
Emulate sinter.
10,555
def taskfileinfo_task_data(tfi, role): task = tfi.task if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: return task.name
Return the data for task :param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data :type tfi: :class:`jukeboxcore.filesys.TaskFileInfo` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the task :rtype: depending on role :raises: None
10,556
def get_variable_set(self, variable_set, data): if data.get(): variable_set = [] elif data.get(): op, layer_ids = data[].split(, 1) op = op.lower() layer_ids = [int(x) for x in layer_ids.split()] if op in (, ): variable_set = [x for x in variable_set if x.index in layer_ids] elif op in (, ): variable_set = [x for x in variable_set if x.index not in layer_ids] elif self.service.render_top_layer_only: variable_set = [variable_set[0]] return variable_set
Filters the given variable set based on request parameters
10,557
def _construct_replset(self, basedir, portstart, name, num_nodes, arbiter, extra=): self.config_docs[name] = {: name, : []} for i in num_nodes: datapath = self._create_paths(basedir, % (name, i + 1)) self._construct_mongod(os.path.join(datapath, ), os.path.join(datapath, ), portstart + i, replset=name, extra=extra) host = % (self.args[], portstart + i) member_config = { : len(self.config_docs[name][]), : host, } if i == 0 and self.args[]: member_config[] = 10 if i >= 7: member_config[] = 0 member_config[] = 0 self.config_docs[name][].append(member_config) if arbiter: datapath = self._create_paths(basedir, % (name)) self._construct_mongod(os.path.join(datapath, ), os.path.join(datapath, ), portstart + self.args[], replset=name) host = % (self.args[], portstart + self.args[]) (self.config_docs[name][] .append({: len(self.config_docs[name][]), : host, : True})) return(name + + .join([c[] for c in self.config_docs[name][]]))
Construct command line strings for a replicaset. Handles single set or sharded cluster.
10,558
def macro(name): def wrapper(view, context, model, column): if in name: macro_import_name, macro_name = name.split() m = getattr(context.get(macro_import_name), macro_name, None) else: m = context.resolve(name) if not m: return m return m(model=model, column=column) return wrapper
Replaces :func:`~flask_admin.model.template.macro`, adding support for using macros imported from another file. For example: .. code:: html+jinja {# templates/admin/column_formatters.html #} {% macro email(model, column) %} {% set address = model[column] %} <a href="mailto:{{ address }}">{{ address }}</a> {% endmacro %} .. code:: python class FooAdmin(ModelAdmin): column_formatters = { 'col_name': macro('column_formatters.email') } Also required for this to work, is to add the following to the top of your master admin template: .. code:: html+jinja {# templates/admin/master.html #} {% import 'admin/column_formatters.html' as column_formatters with context %}
10,559
def deep_update_dict(origin_dict, override_dict): if not override_dict: return origin_dict for key, val in override_dict.items(): if isinstance(val, dict): tmp = deep_update_dict(origin_dict.get(key, {}), val) origin_dict[key] = tmp elif val is None: continue else: origin_dict[key] = override_dict[key] return origin_dict
update origin dict with override dict recursively e.g. origin_dict = {'a': 1, 'b': {'c': 2, 'd': 4}} override_dict = {'b': {'c': 3}} return: {'a': 1, 'b': {'c': 3, 'd': 4}}
10,560
def show(movie): for key, value in sorted(movie.iteritems(), cmp=metadata_sorter, key=lambda x: x[0]): if isinstance(value, list): if not value: continue other = value[1:] value = value[0] else: other = [] printer.p(, key=key, value=value) for value in other: printer.p(, value=value, pad= * (len(key) + 2))
Show the movie metadata.
10,561
def replace(self, src: str) -> str: if not self.readied: self.ready() src = self._dict_replace(self.simple_pre, src) for regex, replace in self.regex_pre: src = regex.sub(replace, src) src = self._operators_replace(src) src_prev = src for i in range(self.max_iter): for regex, replace in self.loop_regexps: src = regex.sub(replace, src) if src_prev == src: break else: src_prev = src for regex, replace in self.regex_post: src = regex.sub(replace, src) src = self._dict_replace(self.simple_post, src) src = self.escapes_regex.sub(r, src) return src
Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string
10,562
def to_internal_value(self, data): if data is None: return if not isinstance(data, dict): self.fail() if not self.allow_empty and len(data) == 0: self.fail() result, errors = {}, {} for lang_code, model_fields in data.items(): serializer = self.serializer_class(data=model_fields) if serializer.is_valid(): result[lang_code] = serializer.validated_data else: errors[lang_code] = serializer.errors if errors: raise serializers.ValidationError(errors) return result
Deserialize data from translations fields. For each received language, delegate validation logic to the translation model serializer.
10,563
def run(self, args): Mframe.adjust_relative(self.proc, self.name, args, self.signum) return False
**down** [*count*] Move the current frame down in the stack trace (to a newer frame). 0 is the most recent frame. If no count is given, move down 1. See also: --------- `up` and `frame`.
10,564
def get_repo(path=None, alias=None, create=False): if create: if not (path or alias): raise TypeError("If create is specified, we need path and scm type") return get_backend(alias)(path, create=True) if path is None: path = abspath(os.path.curdir) try: scm, path = get_scm(path, search_up=True) path = abspath(path) alias = scm except VCSError: raise VCSError("No scm found at %s" % path) if alias is None: alias = get_scm(path)[0] backend = get_backend(alias) repo = backend(path, create=create) return repo
Returns ``Repository`` object of type linked with given ``alias`` at the specified ``path``. If ``alias`` is not given it will try to guess it using get_scm method
10,565
def sendcontrol(self, char): \ag char = char.lower() a = ord(char) if 97 <= a <= 122: a = a - ord() + 1 byte = _byte(a) return self._writeb(byte), byte d = {: 0, : 0, : 27, : 27, : 28, : 28, : 29, : 29, : 30, : 30, : 31, : 127} if char not in d: return 0, b byte = _byte(d[char]) return self._writeb(byte), byte
Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof().
10,566
def crab_factory(**kwargs): if in kwargs: wsdl = kwargs[] del kwargs[] else: wsdl = "http://crab.agiv.be/wscrab/wscrab.svc?wsdl" log.info(, wsdl) c = Client( wsdl, **kwargs ) return c
Factory that generates a CRAB client. A few parameters will be handled by the factory, other parameters will be passed on to the client. :param wsdl: `Optional.` Allows overriding the default CRAB wsdl url. :param proxy: `Optional.` A dictionary of proxy information that is passed to the underlying :class:`suds.client.Client` :rtype: :class:`suds.client.Client`
10,567
async def prepare_decrypter(client, cdn_client, cdn_redirect): cdn_aes = AESModeCTR( key=cdn_redirect.encryption_key, iv=cdn_redirect.encryption_iv[:12] + bytes(4) ) decrypter = CdnDecrypter( cdn_client, cdn_redirect.file_token, cdn_aes, cdn_redirect.cdn_file_hashes ) cdn_file = await cdn_client(GetCdnFileRequest( file_token=cdn_redirect.file_token, offset=cdn_redirect.cdn_file_hashes[0].offset, limit=cdn_redirect.cdn_file_hashes[0].limit )) if isinstance(cdn_file, CdnFileReuploadNeeded): await client(ReuploadCdnFileRequest( file_token=cdn_redirect.file_token, request_token=cdn_file.request_token )) cdn_file = decrypter.get_file() else: cdn_file.bytes = decrypter.cdn_aes.encrypt(cdn_file.bytes) cdn_hash = decrypter.cdn_file_hashes.pop(0) decrypter.check(cdn_file.bytes, cdn_hash) return decrypter, cdn_file
Prepares a new CDN decrypter. :param client: a TelegramClient connected to the main servers. :param cdn_client: a new client connected to the CDN. :param cdn_redirect: the redirect file object that caused this call. :return: (CdnDecrypter, first chunk file data)
10,568
def executable_path(conn, executable): executable_path = conn.remote_module.which(executable) if not executable_path: raise ExecutableNotFound(executable, conn.hostname) return executable_path
Remote validator that accepts a connection object to ensure that a certain executable is available returning its full path if so. Otherwise an exception with thorough details will be raised, informing the user that the executable was not found.
10,569
def load_json_file(file, decoder=None): if decoder is None: decoder = DateTimeDecoder if not hasattr(file, "read"): with io.open(file, "r", encoding="utf-8") as f: return json.load(f, object_hook=decoder.decode) return json.load(file, object_hook=decoder.decode)
Load data from json file :param file: Readable object or path to file :type file: FileIO | str :param decoder: Use custom json decoder :type decoder: T <= DateTimeDecoder :return: Json data :rtype: None | int | float | str | list | dict
10,570
def did_you_mean(unknown_command, entry_points): from difflib import SequenceMatcher similarity = lambda x: SequenceMatcher(None, x, unknown_command).ratio() did_you_mean = sorted(entry_points, key=similarity, reverse=True) return did_you_mean[0]
Return the command with the name most similar to what the user typed. This is used to suggest a correct command when the user types an illegal command.
10,571
def init_db(self, db_path): self.database = % db_path self.storage = SREGISTRY_STORAGE bot.debug("Database located at %s" % self.database) self.engine = create_engine(self.database, convert_unicode=True) self.session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=self.engine)) Base.query = self.session.query_property() Base.metadata.create_all(bind=self.engine) self.Base = Base
initialize the database, with the default database path or custom of the format sqlite:////scif/data/expfactory.db The custom path can be set with the environment variable SREGISTRY_DATABASE when a user creates the client, we must initialize this db the database should use the .singularity cache folder to cache layers and images, and .singularity/sregistry.db as a database
10,572
def do_i_raise_dependency(self, status, inherit_parents, hosts, services, timeperiods): for stat in status: if self.is_state(stat): return True if not inherit_parents: return False now = time.time() for (dep_id, dep_status, _, timeperiod_id, inh_parent) in self.chk_depend_of: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] timeperiod = timeperiods[timeperiod_id] if dep.do_i_raise_dependency(dep_status, inh_parent, hosts, services, timeperiods): if timeperiod is None or timeperiod.is_time_valid(now): return True return False
Check if this object or one of its dependency state (chk dependencies) match the status :param status: state list where dependency matters (notification failure criteria) :type status: list :param inherit_parents: recurse over parents :type inherit_parents: bool :param hosts: hosts objects, used to raise dependency check :type hosts: alignak.objects.host.Hosts :param services: services objects, used to raise dependency check :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: True if one state matched the status list, otherwise False :rtype: bool
10,573
def load_header_chain( cls, chain_path ): header_parser = BlockHeaderSerializer() chain = [] height = 0 with open(chain_path, "rb") as f: h = SPVClient.read_header_at( f ) h[] = height height += 1 chain.append(h) return chain
Load the header chain from disk. Each chain element will be a dictionary with: *
10,574
def from_string(url, default_protocol=): if url is None: raise TypeError( + type(url)) result = Url() match = re.match(r, url) if match: result.protocol = match.group(1) else: result.protocol = default_protocol query = if in url: url, query = url.split(, 1) result.vars = _urlparse_qs( + query) result.port = int(port) return result
Parses the given URL and returns an URL object. There are some differences to Python's built-in URL parser: - It is less strict, many more inputs are accepted. This is necessary to allow for passing a simple hostname as a URL. - You may specify a default protocol that is used when the http:// portion is missing. - The port number defaults to the well-known port of the given protocol. - The query variables are parsed into a dictionary (Url.vars). :type url: str :param url: A URL. :type default_protocol: string :param default_protocol: A protocol name. :rtype: Url :return: The Url object contructed from the given URL.
10,575
def normalize_path_out(self, path): if path.startswith(self._CWD): normalized_path = path[len(self._CWD):] else: normalized_path = path if self._CLIENT_CWD: normalized_path = os.path.join(self._CLIENT_CWD, normalized_path) _logger.p_debug("normalize_path_out() => %s", path, normalized_path) return normalized_path
Normalizes path sent to client :param path: path to normalize :return: normalized path
10,576
def _reset(self): self.hw.remote_at( dest_addr=self.remote_addr, command=, parameter=) self.deactivate() self._set_send_samples(False)
reset: None -> None Resets the remote XBee device to a standard configuration
10,577
def write(self, frame): if not isinstance(frame, FrameBase): raise PyVLXException("Frame not of type FrameBase", frame_type=type(frame)) PYVLXLOG.debug("SEND: %s", frame) self.transport.write(slip_pack(bytes(frame)))
Write frame to Bus.
10,578
def p_line_label_asm(p): p[0] = p[2] __DEBUG__("Declaring (value %04Xh) in %i" % (NAMESPACE, p[1], MEMORY.org, p.lineno(1))) MEMORY.declare_label(p[1], p.lineno(1))
line : LABEL asms NEWLINE
10,579
def generate_security_data(self): timestamp = int(time.time()) security_dict = { : str(self.target_object._meta), : str(self.target_object._get_pk_val()), : str(timestamp), : self.initial_security_hash(timestamp), } return security_dict
Generate a dict of security data for "initial" data.
10,580
def Vmg(self): rhexane return self.VolumeGasMixture(T=self.T, P=self.P, zs=self.zs, ws=self.ws)
r'''Gas-phase molar volume of the mixture at its current temperature, pressure, and composition in units of [m^3/mol]. For calculation of this property at other temperatures or pressures or compositions, or specifying manually the method used to calculate it, and more - see the object oriented interface :obj:`thermo.volume.VolumeGasMixture`; each Mixture instance creates one to actually perform the calculations. Examples -------- >>> Mixture(['hexane'], ws=[1], T=300, P=2E5).Vmg 0.010888694235142216
10,581
def bleu_score(logits, labels): predictions = tf.to_int32(tf.argmax(logits, axis=-1)) bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32) return bleu, tf.constant(1.0)
Approximate BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch-size, length_labels] Returns: bleu: int, approx bleu score
10,582
def plot_dives_pitch(depths, dive_mask, des, asc, pitch, pitch_lf): import copy import numpy from . import plotutils fig, (ax1, ax2) = plt.subplots(2,1, sharex=True) des_ind = numpy.where(dive_mask & des)[0] asc_ind = numpy.where(dive_mask & asc)[0] ax1.title.set_text() ax1 = plotutils.plot_noncontiguous(ax1, depths, des_ind, _colors[0], ) ax1 = plotutils.plot_noncontiguous(ax1, depths, asc_ind, _colors[1], ) ax1.legend(loc=) ax1.invert_yaxis() ax1.yaxis.label.set_text() ax1.xaxis.label.set_text() ax2.title.set_text() ax2.plot(range(len(pitch)), pitch, color=_colors[2], linewidth=_linewidth, label=) ax2.plot(range(len(pitch_lf)), pitch_lf, color=_colors[3], linewidth=_linewidth, label=) ax2.legend(loc=) ax2.yaxis.label.set_text() ax2.yaxis.label.set_text() plt.show() return None
Plot dives with phase and associated pitch angle with HF signal Args ---- depths: ndarray Depth values at each sensor sampling dive_mask: ndarray Boolean mask slicing dives from the tag data des: ndarray boolean mask for slicing descent phases of dives from tag dta asc: ndarray boolean mask for slicing asccent phases of dives from tag dta pitch: ndarray Pitch angle derived from acceleromter data pitch_lf: ndarray Low-pass filtered derived pitch angle data
10,583
def optional_else(self, node, last): if node.orelse: min_first_max_last(node, node.orelse[-1]) if in self.operators: position = (node.orelse[0].first_line, node.orelse[0].first_col) _, efirst = self.operators[].find_previous(position) if efirst and efirst > last: elast, _ = self.operators[].find_previous(position) node.op_pos.append(NodeWithPosition(elast, efirst))
Create op_pos for optional else
10,584
def predict(inputs_list, problem, request_fn): assert isinstance(inputs_list, list) fname = "inputs" if problem.has_inputs else "targets" input_encoder = problem.feature_info[fname].encoder input_ids_list = [ _encode(inputs, input_encoder, add_eos=problem.has_inputs) for inputs in inputs_list ] examples = [_make_example(input_ids, problem, fname) for input_ids in input_ids_list] predictions = request_fn(examples) output_decoder = problem.feature_info["targets"].encoder outputs = [ (_decode(prediction["outputs"], output_decoder), prediction["scores"]) for prediction in predictions ] return outputs
Encodes inputs, makes request to deployed TF model, and decodes outputs.
10,585
def many_init(cls, *args, **kwargs): list_kwargs = {: cls(*args, **kwargs)} for key in kwargs.keys(): if key in MANY_RELATION_KWARGS: list_kwargs[key] = kwargs[key] return ManyRelatedField(**list_kwargs)
This method handles creating a parent `ManyRelatedField` instance when the `many=True` keyword argument is passed. Typically you won't need to override this method. Note that we're over-cautious in passing most arguments to both parent and child classes in order to try to cover the general case. If you're overriding this method you'll probably want something much simpler, eg: @classmethod def many_init(cls, *args, **kwargs): kwargs['child'] = cls() return CustomManyRelatedField(*args, **kwargs)
10,586
def _create_pure_shape(self, primitive_type, options, sizes, mass, precision): lua_code = "simCreatePureShape({}, {}, {{{}, {}, {}}}, {}, {{{}, {}}})".format( primitive_type, options, sizes[0], sizes[1], sizes[2], mass, precision[0], precision[1]) self._inject_lua_code(lua_code)
Create Pure Shape
10,587
def batch(batch_size, items): "Batch items into groups of batch_size" items = list(items) if batch_size is None: return [items] MISSING = object() padded_items = items + [MISSING] * (batch_size - 1) groups = zip(*[padded_items[i::batch_size] for i in range(batch_size)]) return [[item for item in group if item != MISSING] for group in groups]
Batch items into groups of batch_size
10,588
def get_icohp_dict_by_bondlengths(self, minbondlength=0.0, maxbondlength=8.0): newicohp_dict = {} for value in self._icohplist.values(): if value._length >= minbondlength and value._length <= maxbondlength: newicohp_dict[value._label] = value return newicohp_dict
get a dict of IcohpValues corresponding to certaind bond lengths Args: minbondlength: defines the minimum of the bond lengths of the bonds maxbondlength: defines the maximum of the bond lengths of the bonds Returns: dict of IcohpValues, the keys correspond to the values from the initial list_labels
10,589
def wite_to_json(self, dir_path="", file_name=""): data = { "plot_data": self.record_thread.profile_data, "method_exec_info": self.method_exec_info, "search_file": self.search_file, "source_file": self.source_file} file_path = os.path.join(dir_path, file_name) if not os.path.exists(dir_path): os.makedirs(dir_path) json.dump(data, open(file_path, "w+"), indent=4)
将性能数据写入文件.
10,590
def modify_document(self, doc): if self._lifecycle_handler.failed: return if self._theme is not None: doc.theme = self._theme if self._template is not None: doc.template = self._template self._main_handler.modify_document(doc)
Execute the configured ``main.py`` or ``main.ipynb`` to modify the document. This method will also search the app directory for any theme or template files, and automatically configure the document with them if they are found.
10,591
def getboolean_optional(self, section, option, default=False): try: return self.getboolean(section, option) except (configparser.NoSectionError, configparser.NoOptionError): return default
Get an option boolean value for a given section If the section or the option are not found, the default value is returned :param section: config section :param option: config option :param default: default value :returns: boolean config value
10,592
def generate_fault_source_model(self): source_model = [] model_weight = [] for iloc in range(0, self.get_number_mfd_models()): model_mfd = EvenlyDiscretizedMFD( self.mfd[0][iloc].min_mag, self.mfd[0][iloc].bin_width, self.mfd[0][iloc].occur_rates.tolist()) if isinstance(self.geometry, ComplexFaultGeometry): source = mtkComplexFaultSource( self.id, self.name, self.trt, self.geometry.surface, self.mfd[2][iloc], self.rupt_aspect_ratio, model_mfd, self.rake) source.fault_edges = self.geometry.trace else: source = mtkSimpleFaultSource( self.id, self.name, self.trt, self.geometry.surface, self.geometry.dip, self.geometry.upper_depth, self.geometry.lower_depth, self.mfd[2][iloc], self.rupt_aspect_ratio, model_mfd, self.rake) source.fault_trace = self.geometry.trace source_model.append(source) model_weight.append(self.mfd[1][iloc]) return source_model, model_weight
Creates a resulting `openquake.hmtk` fault source set. :returns: source_model - list of instances of either the :class: `openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource` or :class: `openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource` model_weight - Corresponding weights for each source model
10,593
def to_str(data): if isinstance(data, bytes): return codecs.decode(data, aws_encryption_sdk.internal.defaults.ENCODING) return data
Takes an input str or bytes object and returns an equivalent str object. :param data: Input data :type data: str or bytes :returns: Data normalized to str :rtype: str
10,594
def _deserialize(cls, key, value, fields): converter = cls._get_converter_for_field(key, None, fields) return converter.deserialize(value)
Marshal incoming data into Python objects.
10,595
def create(self, comment, mentions=()): data = { "app": self.app_id, "record": self.record_id, "comment": { "text": comment, } } if len(mentions) > 0: _mentions = [] for m in mentions: if isinstance(m, (list, tuple)): if len(m) == 2: _mentions.append({ "code": m[0], "type": m[1] }) else: raise Exception("mention have to have code and target type. ex.[(, )]") elif isinstance(m, Mention): _mentions.append(m.serialize()) data["comment"]["mentions"] = _mentions resp = self._request("POST", self._url, data) r = cr.CreateCommentResult(resp) return r
create comment :param comment: :param mentions: list of pair of code and type("USER", "GROUP", and so on) :return:
10,596
def get_vulnerability_chains( current_node, sink, def_use, chain=[] ): for use in def_use[current_node]: if use == sink: yield chain else: vuln_chain = list(chain) vuln_chain.append(use) yield from get_vulnerability_chains( use, sink, def_use, vuln_chain )
Traverses the def-use graph to find all paths from source to sink that cause a vulnerability. Args: current_node() sink() def_use(dict): chain(list(Node)): A path of nodes between source and sink.
10,597
def get_edge_values(self, feature=): elist = [] for cidx in self._coords.edges[:, 1]: node = self.treenode.search_nodes(idx=cidx)[0] elist.append( (node.__getattribute__(feature) if hasattr(node, feature) else "") ) return elist
Returns edge values in the order they are plotted (see .get_edges())
10,598
def _log_A_0(params, freq, recency, age): r, alpha, s, beta = params if alpha < beta: min_of_alpha_beta, max_of_alpha_beta, t = (alpha, beta, r + freq) else: min_of_alpha_beta, max_of_alpha_beta, t = (beta, alpha, s + 1) abs_alpha_beta = max_of_alpha_beta - min_of_alpha_beta rsf = r + s + freq p_1 = hyp2f1(rsf, t, rsf + 1.0, abs_alpha_beta / (max_of_alpha_beta + recency)) q_1 = max_of_alpha_beta + recency p_2 = hyp2f1(rsf, t, rsf + 1.0, abs_alpha_beta / (max_of_alpha_beta + age)) q_2 = max_of_alpha_beta + age try: size = len(freq) sign = np.ones(size) except TypeError: sign = 1 return logsumexp([log(p_1) + rsf * log(q_2), log(p_2) + rsf * log(q_1)], axis=0, b=[sign, -sign]) - rsf * log( q_1 * q_2 )
log_A_0.
10,599
def tangle(*args, **kwargs): class_attrs = { "_links": [], "_dlinks": [], "_derived": {} } for value in args: if isinstance(value, function): class_attrs[key] = traitlet_cls(*traitlet_args, **traitlet_kwargs) new_class = type( .format(id(class_attrs)), (AutoTangle,), class_attrs ) inst = new_class() return inst._refresh()
Shortcut to create a new, custom Tangle model. Use instead of directly subclassing `Tangle`. A new, custom Widget class is created, with each of `kwargs` as a traitlet. Returns an instance of the new class with default values. `kwargs` options - primitive types (int, bool, float) will be created as casting versions (`CInt`, `CBool`, `CFloat`) - a `list` will be created as an `Enum` - a `Widget` instance will create a link to that widget's `value` - a `tuple` `(widget_instance, "traitlet")` will create a `link` - functions will be `inspect`ed to find their argument names subscribed for update... this uses `inspect`, won't work with `*` magic - a `tuple` `(function, default)` will be created as the type (as above)