Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
25,200 |
def loadFiles(self, fileNames, rtiClass=None):
for fileName in fileNames:
self.repo.loadFile(fileName, rtiClass=rtiClass)
|
Loads files into the repository as repo tree items of class rtiClass.
Auto-detects using the extensions when rtiClass is None
|
25,201 |
def create_target_group(name,
protocol,
port,
vpc_id,
region=None,
key=None,
keyid=None,
profile=None,
health_check_protocol=,
health_check_port=,
health_check_path=,
health_check_interval_seconds=30,
health_check_timeout_seconds=5,
healthy_threshold_count=5,
unhealthy_threshold_count=2):
traffic-port
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if target_group_exists(name, region, key, keyid, profile):
return True
try:
alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port,
VpcId=vpc_id, HealthCheckProtocol=health_check_protocol,
HealthCheckPort=health_check_port,
HealthCheckPath=health_check_path,
HealthCheckIntervalSeconds=health_check_interval_seconds,
HealthCheckTimeoutSeconds=health_check_timeout_seconds,
HealthyThresholdCount=healthy_threshold_count,
UnhealthyThresholdCount=unhealthy_threshold_count)
if alb:
log.info(, name, alb[][0][])
return True
else:
log.error(, name)
return False
except ClientError as error:
log.error(
,
name, error.response[][],
error.response[][],
exc_info_on_loglevel=logging.DEBUG
)
|
Create target group if not present.
name
(string) - The name of the target group.
protocol
(string) - The protocol to use for routing traffic to the targets
port
(int) - The port on which the targets receive traffic. This port is used unless
you specify a port override when registering the traffic.
vpc_id
(string) - The identifier of the virtual private cloud (VPC).
health_check_protocol
(string) - The protocol the load balancer uses when performing health check on
targets. The default is the HTTP protocol.
health_check_port
(string) - The port the load balancer uses when performing health checks on
targets. The default is 'traffic-port', which indicates the port on which each
target receives traffic from the load balancer.
health_check_path
(string) - The ping path that is the destination on the targets for health
checks. The default is /.
health_check_interval_seconds
(integer) - The approximate amount of time, in seconds, between health checks
of an individual target. The default is 30 seconds.
health_check_timeout_seconds
(integer) - The amount of time, in seconds, during which no response from a
target means a failed health check. The default is 5 seconds.
healthy_threshold_count
(integer) - The number of consecutive health checks successes required before
considering an unhealthy target healthy. The default is 5.
unhealthy_threshold_count
(integer) - The number of consecutive health check failures required before
considering a target unhealthy. The default is 2.
returns
(bool) - True on success, False on failure.
CLI example:
.. code-block:: bash
salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef
|
25,202 |
def isclose(a, b, align=False, rtol=1.e-5, atol=1.e-8):
coords = [, , ]
if not (set(a.index) == set(b.index)
and np.alltrue(a.loc[:, ] == b.loc[a.index, ])):
message =
raise ValueError(message)
if align:
a = a.get_inertia()[]
b = b.get_inertia()[]
A, B = a.loc[:, coords], b.loc[a.index, coords]
out = a._frame.copy()
out[] = True
out.loc[:, coords] = np.isclose(A, B, rtol=rtol, atol=atol)
return out
|
Compare two molecules for numerical equality.
Args:
a (Cartesian):
b (Cartesian):
align (bool): a and b are
prealigned along their principal axes of inertia and moved to their
barycenters before comparing.
rtol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.isclose` for further explanation.
atol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.isclose` for further explanation.
Returns:
:class:`numpy.ndarray`: Boolean array.
|
25,203 |
def resource_from_etree(self, etree, resource_class):
loc_elements = etree.findall( + SITEMAP_NS + "}loc")
if (len(loc_elements) > 1):
raise SitemapParseError(
"Multiple <loc> elements while parsing <url> in sitemap")
elif (len(loc_elements) == 0):
raise SitemapParseError(
"Missing <loc> element while parsing <url> in sitemap")
else:
loc = loc_elements[0].text
if (loc is None or loc == ):
raise SitemapParseError(
"Bad <loc> element with no content while parsing <url> in sitemap")
resource = resource_class(uri=loc)
lastmod_elements = etree.findall( + SITEMAP_NS + "}lastmod")
if (len(lastmod_elements) > 1):
raise SitemapParseError(
"Multiple <lastmod> elements while parsing <url> in sitemap")
elif (len(lastmod_elements) == 1):
resource.lastmod = lastmod_elements[0].text
md_elements = etree.findall( + RS_NS + "}md")
if (len(md_elements) > 1):
raise SitemapParseError(
"Found multiple (%d) <rs:md> elements for %s", (len(md_elements), loc))
elif (len(md_elements) == 1):
md = self.md_from_etree(md_elements[0], context=loc)
for att in (, , , , ):
if (att in md):
setattr(resource, att, md[att])
if ( in md):
try:
resource.hash = md[]
except ValueError as e:
self.logger.warning("%s in <rs:md> for %s" % (str(e), loc))
ln_elements = etree.findall( + RS_NS + "}ln")
if (len(ln_elements) > 0):
resource.ln = []
for ln_element in ln_elements:
resource.ln.append(self.ln_from_etree(ln_element, loc))
return(resource)
|
Construct a Resource from an etree.
Parameters:
etree - the etree to parse
resource_class - class of Resource object to create
The parsing is properly namespace aware but we search just
for the elements wanted and leave everything else alone. Will
raise an error if there are multiple <loc> or multiple <lastmod>
elements. Otherwise, provided there is a <loc> element then will
go ahead and extract as much as possible.
All errors raised are SitemapParseError with messages intended
to help debug problematic sitemap XML.
|
25,204 |
def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
task.set_name()
return task
|
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use.
|
25,205 |
def pdf_Gates_Gaudin_Schuhman(d, d_characteristic, m):
r
if d <= d_characteristic:
return m/d*(d/d_characteristic)**m
else:
return 0.0
|
r'''Calculates the probability density of a particle
distribution following the Gates, Gaudin and Schuhman (GGS) model given a
particle diameter `d`, characteristic (maximum) particle
diameter `d_characteristic`, and exponent `m`.
.. math::
q(d) = \frac{n}{d}\left(\frac{d}{d_{characteristic}}\right)^m
\text{ if } d < d_{characteristic} \text{ else } 0
Parameters
----------
d : float
Specified particle diameter, [m]
d_characteristic : float
Characteristic particle diameter; in this model, it is the largest
particle size diameter in the distribution, [m]
m : float
Particle size distribution exponent, [-]
Returns
-------
pdf : float
GGS probability density function, [-]
Notes
-----
The characteristic diameter can be in terns of number density (denoted
:math:`q_0(d)`), length density (:math:`q_1(d)`), surface area density
(:math:`q_2(d)`), or volume density (:math:`q_3(d)`). Volume density is
most often used. Interconversions among the distributions is possible but
tricky.
Examples
--------
>>> pdf_Gates_Gaudin_Schuhman(d=2E-4, d_characteristic=1E-3, m=2.3)
283.8355768512045
References
----------
.. [1] Schuhmann, R., 1940. Principles of Comminution, I-Size Distribution
and Surface Calculations. American Institute of Mining, Metallurgical
and Petroleum Engineers Technical Publication 1189. Mining Technology,
volume 4, p. 1-11.
.. [2] Bayat, Hossein, Mostafa Rastgo, Moharram Mansouri Zadeh, and Harry
Vereecken. "Particle Size Distribution Models, Their Characteristics and
Fitting Capability." Journal of Hydrology 529 (October 1, 2015): 872-89.
|
25,206 |
def matches_prefix(ip, prefix):
ip_int = ip2int(ip)
network, pfxlen = parse_prefix(prefix)
network_int = ip2int(network)
mask_int = pfxlen2mask_int(pfxlen)
return ip_int&mask_int == network_int&mask_int
|
Returns True if the given IP address is part of the given
network, returns False otherwise.
:type ip: string
:param ip: An IP address.
:type prefix: string
:param prefix: An IP prefix.
:rtype: bool
:return: True if the IP is in the prefix, False otherwise.
|
25,207 |
def _flush(self):
self._buffer.sort()
self._replace_batch()
self._buffer = []
self._compress()
|
Purges the buffer and commits all pending values into the estimator.
|
25,208 |
def is_suicide_or_check_by_dropping_pawn(self, move):
self.push(move)
is_suicide = self.was_suicide()
is_check_by_dropping_pawn = self.was_check_by_dropping_pawn(move)
self.pop()
return is_suicide or is_check_by_dropping_pawn
|
Checks if the given move would move would leave the king in check or
put it into check.
|
25,209 |
def invert_hash(self, tok_hash):
return [tok_encoded.decode()
for (_, tok_encoded) in
self.client.scan_keys(HASH_KEYWORD_INDEX_TABLE,
((tok_hash,), (tok_hash,)))]
|
Get strings that correspond to some hash.
No string will correspond to :data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int tok_hash: Murmur hash to query
:return: list of :class:`unicode` strings
|
25,210 |
def get_node_fact_by_name(api_url=None, node_name=None, fact_name=None, verify=False, cert=list()):
return utils._make_api_request(api_url, .format(node_name,
fact_name), verify, cert)
|
Returns specified fact for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
:param fact_name: Name of fact
|
25,211 |
def libvlc_media_get_type(p_md):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,),), None,
MediaType, Media)
return f(p_md)
|
Get the media type of the media descriptor object.
@param p_md: media descriptor object.
@return: media type.
@version: LibVLC 3.0.0 and later. See libvlc_media_type_t.
|
25,212 |
def get_item(key):
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
try:
return json.loads(open(CACHED_KEY_FILE, "rb").read().decode())["_"]
except (IOError, ValueError):
return None
|
Return content in cached file in JSON format
|
25,213 |
def start_tcp_server(self, port):
self.__tcp_server.port = port
if not self.__tcp_server.online:
if self.__tcp_server.start():
self.__engine.notifications_manager.notify(
"{0} | TCP Server has started with address on port!".format(
self.__class__.__name__,
self.__address,
self.__port))
return True
else:
self.__engine.notifications_manager.warnify(
"{0} | TCP Server is already online!".format(self.__class__.__name__))
return False
|
Starts the TCP server using given port.
:param port: Port.
:type port: int
:return: Method success.
:rtype: bool
|
25,214 |
def positions_to_contigs(positions):
if isinstance(positions, np.ndarray):
flattened_positions = positions.flatten()
else:
try:
flattened_positions = np.array(
[pos for contig in positions for pos in contig])
except TypeError:
flattened_positions = np.array(positions)
if (np.diff(positions) == 0).any() and not (0 in set(positions)):
warnings.warn("I detected identical consecutive nonzero values.")
return positions
n = len(flattened_positions)
contigs = np.ones(n)
counter = 0
for i in range(1, n):
if positions[i] == 0:
counter += 1
contigs[i] += counter
else:
contigs[i] = contigs[i - 1]
return contigs
|
Flattens and converts a positions array to a contigs array, if applicable.
|
25,215 |
def write_table(page, headers, data, cl=):
page.table(class_=cl)
if cl==:
for i in range(len(headers)):
page.tr()
page.th()
page.add( % headers[i])
page.th.close()
page.td()
page.add( % data[i])
page.td.close()
page.tr.close()
else:
page.tr()
for n in headers:
page.th()
page.add( % n)
page.th.close()
page.tr.close()
if data and not re.search(,str(type(data[0]))):
data = [data]
for row in data:
page.tr()
for item in row:
page.td()
page.add( % item)
page.td.close()
page.tr.close()
page.table.close()
return page
|
Write table in html
|
25,216 |
def _validate_minlength(self, min_length, field, value):
if isinstance(value, Iterable) and len(value) < min_length:
self._error(field, errors.MIN_LENGTH, len(value))
|
{'type': 'integer'}
|
25,217 |
def shorten_go_name_ptbl3(self, name, dcnt):
if self._keep_this(name):
return name
name = name.replace("positive regulation of immune system process",
"+ reg. of immune sys. process")
name = name.replace("positive regulation of immune response",
"+ reg. of immune response")
name = name.replace("positive regulation of cytokine production",
"+ reg. of cytokine production")
if dcnt < 40:
name = name.replace("antigen processing and presentation", "a.p.p.")
if dcnt < 10:
name = name.replace("negative", "-")
name = name.replace("positive", "+")
name = name.replace("tumor necrosis factor production", "TNF production")
if dcnt < 4:
name = name.replace("regulation", "reg.")
name = name.replace("exogenous ", "")
name = name.replace(" via ", " w/")
name = name.replace("T cell mediated cytotoxicity", "cytotoxicity via T cell")
name = name.replace(, )
name = name.replace(, )
return name
|
Shorten GO description for Table 3 in manuscript.
|
25,218 |
def make(data, samples):
outfile = open(os.path.join(data.dirs.outfiles, data.name+".vcf"), )
inloci = os.path.join(data.dirs.outfiles, data.name+".loci")
names = [i.name for i in samples]
names.sort()
version = "0.1"
outfile.write( "\n".join(vcflist) )
outfile.close()
|
build a vcf file from the supercatg array and the cat.clust.gz output
|
25,219 |
def in_(self, *objs):
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
keys = get_objs_columns(objs, self.reference_fieldname)
sub_query = select([self.table.c[self.fielda]], (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & (self.table.c[self.fieldb].in_(keys)))
condition = self.model_class.c[self.reversed_fieldname].in_(sub_query)
return condition
|
Create a condition
|
25,220 |
def diff(cwd,
item1=None,
item2=None,
opts=,
git_opts=,
user=None,
password=None,
no_index=False,
cached=False,
paths=None,
output_encoding=None):
s own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``diff``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
no_index : False
When it is necessary to diff two files in the same repo against each
other, and not diff two different revisions, set this option to
``True``. If this is left ``False`` in these instances, then a normal
``git diff`` will be performed against the index (i.e. unstaged
changes), and files in the ``paths`` option will be used to narrow down
the diff output.
.. note::
Requires Git 1.5.1 or newer. Additionally, when set to ``True``,
``item1`` and ``item2`` will be ignored.
cached : False
If ``True``, compare staged changes to ``item1`` (if specified),
otherwise compare them to the most recent commit.
.. note::
``item2`` is ignored if this option is is set to ``True``.
paths
File paths to pass to the ``git diff`` command. Can be passed as a
comma-separated list or a Python list.
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-diff(1)`: http://git-scm.com/docs/git-diff
CLI Example:
.. code-block:: bash
salt myminion git.diff /path/to/repo
salt myminion git.diff /path/to/repo cached=True
salt myminion git.diff /path/to/repo mybranch cached=True
salt myminion git.diff /path/to/repo HEAD
salt myminion git.diff /path/to/repo abcdef1 aabbccd
salt myminion git.diff /path/to/repo abcdef1 aabbccd paths=path/to/file1,path/to/file2
salt myminion git.diff /path/to/repo no_index=True paths=path/to/file1,/absolute/path/to/file2
The \ and \ options cannot be used togethergitdiff,,1.5.1The \ option is only supported in Git 1.5.1 and newer--no-indexRevision \ ignored in git diff, as revisions cannot be used when no_index=True--cachedSecond revision \ ignored in git diff, at most one revision is considered when cached=True--stdout']
|
.. versionadded:: 2015.8.12,2016.3.3,2016.11.0
Interface to `git-diff(1)`_
cwd
The path to the git checkout
item1 and item2
Revision(s) to pass to the ``git diff`` command. One or both of these
arguments may be ignored if some of the options below are set to
``True``. When ``cached`` is ``False``, and no revisions are passed
to this function, then the current working tree will be compared
against the index (i.e. unstaged changes). When two revisions are
passed, they will be compared to each other.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` (as in the CLI examples
below) to avoid causing errors with Salt's own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``diff``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
no_index : False
When it is necessary to diff two files in the same repo against each
other, and not diff two different revisions, set this option to
``True``. If this is left ``False`` in these instances, then a normal
``git diff`` will be performed against the index (i.e. unstaged
changes), and files in the ``paths`` option will be used to narrow down
the diff output.
.. note::
Requires Git 1.5.1 or newer. Additionally, when set to ``True``,
``item1`` and ``item2`` will be ignored.
cached : False
If ``True``, compare staged changes to ``item1`` (if specified),
otherwise compare them to the most recent commit.
.. note::
``item2`` is ignored if this option is is set to ``True``.
paths
File paths to pass to the ``git diff`` command. Can be passed as a
comma-separated list or a Python list.
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-diff(1)`: http://git-scm.com/docs/git-diff
CLI Example:
.. code-block:: bash
# Perform diff against the index (staging area for next commit)
salt myminion git.diff /path/to/repo
# Compare staged changes to the most recent commit
salt myminion git.diff /path/to/repo cached=True
# Compare staged changes to a specific revision
salt myminion git.diff /path/to/repo mybranch cached=True
# Perform diff against the most recent commit (includes staged changes)
salt myminion git.diff /path/to/repo HEAD
# Diff two commits
salt myminion git.diff /path/to/repo abcdef1 aabbccd
# Diff two commits, only showing differences in the specified paths
salt myminion git.diff /path/to/repo abcdef1 aabbccd paths=path/to/file1,path/to/file2
# Diff two files with one being outside the working tree
salt myminion git.diff /path/to/repo no_index=True paths=path/to/file1,/absolute/path/to/file2
|
25,221 |
def prev(self):
seg = Segment(segment_t=idaapi.get_prev_seg(self.ea))
if seg.ea >= self.ea:
raise exceptions.NoMoreSegments("This is the first segment. no segments exist before it.")
return seg
|
Get the previous segment.
|
25,222 |
def incr(self, key, value, default=0, time=1000000):
return self._incr_decr(, key, value, default, time)
|
Increment a key, if it exists, returns its actual value, if it doesn't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int
|
25,223 |
def halt(self):
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError()
self._halted = True
|
Halt current endpoint.
|
25,224 |
def play(events, speed_factor=1.0):
state = stash_state()
last_time = None
for event in events:
if speed_factor > 0 and last_time is not None:
_time.sleep((event.time - last_time) / speed_factor)
last_time = event.time
key = event.scan_code or event.name
press(key) if event.event_type == KEY_DOWN else release(key)
restore_modifiers(state)
|
Plays a sequence of recorded events, maintaining the relative time
intervals. If speed_factor is <= 0 then the actions are replayed as fast
as the OS allows. Pairs well with `record()`.
Note: the current keyboard state is cleared at the beginning and restored at
the end of the function.
|
25,225 |
def function(data, maxt=None):
data = np.atleast_1d(data)
assert len(np.shape(data)) == 1, \
"The autocorrelation function can only by computed " \
+ "on a 1D time series."
if maxt is None:
maxt = len(data)
result = np.zeros(maxt, dtype=float)
_acor.function(np.array(data, dtype=float), result)
return result / result[0]
|
Calculate the autocorrelation function for a 1D time series.
Parameters
----------
data : numpy.ndarray (N,)
The time series.
Returns
-------
rho : numpy.ndarray (N,)
An autocorrelation function.
|
25,226 |
def hook_focus_events(self):
widget = self.widget
widget.focusInEvent = self.focusInEvent
widget.focusOutEvent = self.focusOutEvent
|
Install the hooks for focus events.
This method may be overridden by subclasses as needed.
|
25,227 |
def _noise_dict_update(noise_dict):
default_dict = {: 0, : 0, : 1,
: [0.5], : [0.0],
: 0, : 90, : 50,
: 1000, : [1.0, 1.0, 1.0],
: 4, : 1}
for default_key in default_dict:
if default_key not in noise_dict:
noise_dict[default_key] = default_dict[default_key]
return noise_dict
|
Update the noise dictionary parameters with default values, in case any
were missing
Parameters
----------
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
The noise variables are as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
-------
noise_dict : dict
Updated dictionary
|
25,228 |
def remove_nopairs(in_bam, out_dir, config):
runner = broad.runner_from_config(config)
out_bam = os.path.join(out_dir, "{}-safepair{}".format(*os.path.splitext(os.path.basename(in_bam))))
if not utils.file_exists(out_bam):
read_counts = collections.defaultdict(int)
with pysam.Samfile(in_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_paired:
read_counts[read.qname] += 1
with pysam.Samfile(in_bam, "rb") as in_pysam:
with file_transaction(out_bam) as tx_out_bam:
with pysam.Samfile(tx_out_bam, "wb", template=in_pysam) as out_pysam:
for read in in_pysam:
if read_counts[read.qname] == 2:
out_pysam.write(read)
return runner.run_fn("picard_sort", out_bam, "queryname")
|
Remove any reads without both pairs present in the file.
|
25,229 |
def migrate_doc(self, doc):
orig_ver = doc.get(self.version_attribute_name, 0)
funcs = self._get_migrate_funcs(orig_ver, self.target_version)
for func in funcs:
func(self, doc)
doc[self.version_attribute_name] = func.target
return doc
|
Migrate the doc from its current version to the target version
and return it.
|
25,230 |
def get(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_get_codec, key_data, key=key_data,
thread_id=thread_id())
|
Returns the list of values associated with the key. ``None`` if this map does not contain this key.
**Warning:
This method uses hashCode and equals of the binary form of the key, not the actual implementations of hashCode
and equals defined in the key's class.**
**Warning-2:
The list is NOT backed by the multimap, so changes to the map are list reflected in the collection, and
vice-versa.**
:param key: (object), the specified key.
:return: (Sequence), the list of the values associated with the specified key.
|
25,231 |
def location(args):
from jcvi.formats.bed import BedLine
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(location.__doc__)
p.add_option("--dist", default=100, type="int",
help="Distance cutoff to call 5` and 3` [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
dist = opts.dist
sizes = Sizes(fastafile).mapping
fp = open(bedfile)
fiveprime = threeprime = total = 0
percentages = []
for row in fp:
b = BedLine(row)
pos = b.start
size = sizes[b.seqid]
if pos < dist:
fiveprime += 1
if size - pos < dist:
threeprime += 1
total += 1
percentages.append(100 * pos / size)
m = "Five prime (within {0}bp of start codon): {1}\n".format(dist, fiveprime)
m += "Three prime (within {0}bp of stop codon): {1}\n".format(dist, threeprime)
m += "Total: {0}".format(total)
print(m, file=sys.stderr)
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot(percentages, 0, 100, bins, title=title)
|
%prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs.
|
25,232 |
def get(self, *args, **kwargs):
if in kwargs:
kwargs = self.get_filter_args_with_path(True, **kwargs)
return super(FileNodeManager, self).get(
*args, **kwargs)
|
Works just like the default Manager's :func:`get` method, but
you can pass an additional keyword argument named ``path`` specifying
the full path of the object you want to retrieve, e.g.
``"path/to/folder/readme.txt"``.
|
25,233 |
def validate(self, institute, case, user, link, variant, validate_type):
if not validate_type in SANGER_OPTIONS:
LOG.warning("Invalid validation string: %s", validate_type)
LOG.info("Validation options: %s", .join(SANGER_OPTIONS))
return
updated_variant = self.variant_collection.find_one_and_update(
{: variant[]},
{: {: validate_type}},
return_document=pymongo.ReturnDocument.AFTER
)
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category=,
verb=,
variant=variant,
subject=variant[],
)
return updated_variant
|
Mark validation status for a variant.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
validate_type(str): The outcome of validation.
choices=('True positive', 'False positive')
Returns:
updated_variant(dict)
|
25,234 |
def get_subscriptions(self):
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/subscriptions",
None
)
|
:calls: `GET /users/:user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
|
25,235 |
def is_domain_equal(self, other):
domain = self.get_domain()
other_domain = other.get_domain()
if domain == other_domain:
return True
else:
return False
|
Computes whether two Partial Orderings have the same generalization
structure.
|
25,236 |
def run(self, run_priority: Optional[int] = None):
if isinstance(self._executable, Program):
raise TypeError("It looks like you have provided a Program where an Executable"
" is expected. Please use QuantumComputer.compile() to compile"
" your program.")
super().run()
request = QPURequest(program=self._executable.program,
patch_values=self._build_patch_values(),
id=str(uuid.uuid4()))
job_priority = run_priority if run_priority is not None else self.priority
job_id = self.client.call(, request=request, user=self.user,
priority=job_priority)
results = self._get_buffers(job_id)
ro_sources = self._executable.ro_sources
if results:
bitstrings = _extract_bitstrings(ro_sources, results)
elif not ro_sources:
warnings.warn("You are running a QPU program with no MEASURE instructions. "
"The result of this program will always be an empty array. Are "
"you sure you didn't mean to measure some of your qubits?")
bitstrings = np.zeros((0, 0), dtype=np.int64)
else:
bitstrings = None
self._bitstrings = bitstrings
self._last_results = results
return self
|
Run a pyquil program on the QPU.
This formats the classified data from the QPU server by stacking measured bits into
an array of shape (trials, classical_addresses). The mapping of qubit to
classical address is backed out from MEASURE instructions in the program, so
only do measurements where there is a 1-to-1 mapping between qubits and classical
addresses.
:param run_priority: The priority with which to insert jobs into the QPU queue. Lower
integers correspond to higher priority. If not specified, the QPU
object's default priority is used.
:return: The QPU object itself.
|
25,237 |
def add_approximant_arg(parser, default=None, help=None):
if help is None:
help=str("The approximant(s) to use. Multiple approximants to use "
"in different regions may be provided. If multiple "
"approximants are provided, every one but the last must be "
"be followed by a conditional statement defining where that "
"approximant should be used. Conditionals can be any boolean "
"test understood by numpy. For example, would use approximant where total mass "
"is > 4 and chirp mass is <= 5. "
"Conditionals are applied in order, with each successive one "
"only applied to regions not covered by previous arguments. "
"For example, ` ` "
"would result in IMRPhenomD being used where chirp mass is < 3 "
"and total mass is >= 4. The last approximant given may use "
" as the conditional or include no conditional. In either "
"case, this will cause the last approximant to be used in any "
"remaning regions after all the previous conditionals have been "
"applied. For the full list of possible parameters to apply "
"conditionals to, see WaveformArray.default_fields(). Math "
"operations may also be used on parameters; syntax is python, "
"with any operation recognized by numpy.")
parser.add_argument("--approximant", nargs=, type=str, default=default,
metavar=,
help=help)
|
Adds an approximant argument to the given parser.
Parameters
----------
parser : ArgumentParser
The argument parser to add the argument to.
default : {None, str}
Specify a default for the approximant argument. Defaults to None.
help : {None, str}
Provide a custom help message. If None, will use a descriptive message
on how to specify the approximant.
|
25,238 |
def SetTimezone(self, timezone):
if not timezone:
return
try:
self._timezone = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise ValueError(.format(timezone))
|
Sets the timezone.
Args:
timezone (str): timezone.
Raises:
ValueError: if the timezone is not supported.
|
25,239 |
def to_dms(angle, style=):
sign = 1 if angle >= 0 else -1
angle = abs(angle) * 3600
minutes, seconds = divmod(angle, 60)
degrees, minutes = divmod(minutes, 60)
if style == :
return tuple(sign * abs(i) for i in (int(degrees), int(minutes),
seconds))
elif style == :
return tuple(sign * abs(i) for i in (int(degrees),
(minutes + seconds / 60)))
else:
raise ValueError( % style)
|
Convert decimal angle to degrees, minutes and possibly seconds.
Args:
angle (float): Angle to convert
style (str): Return fractional or whole minutes values
Returns:
tuple of int: Angle converted to degrees, minutes and possibly seconds
Raises:
ValueError: Unknown value for ``style``
|
25,240 |
def apply_weight_drop(block, local_param_regex, rate, axes=(),
weight_dropout_mode=):
if not rate:
return
existing_params = _find_params(block, local_param_regex)
for (local_param_name, param), \
(ref_params_list, ref_reg_params_list) in existing_params.items():
dropped_param = WeightDropParameter(param, rate, weight_dropout_mode, axes)
for ref_params in ref_params_list:
ref_params[param.name] = dropped_param
for ref_reg_params in ref_reg_params_list:
ref_reg_params[local_param_name] = dropped_param
if hasattr(block, local_param_name):
local_attr = getattr(block, local_param_name)
if local_attr == param:
local_attr = dropped_param
elif isinstance(local_attr, (list, tuple)):
if isinstance(local_attr, tuple):
local_attr = list(local_attr)
for i, v in enumerate(local_attr):
if v == param:
local_attr[i] = dropped_param
elif isinstance(local_attr, dict):
for k, v in local_attr:
if v == param:
local_attr[k] = dropped_param
else:
continue
if local_attr:
super(Block, block).__setattr__(local_param_name, local_attr)
|
Apply weight drop to the parameter of a block.
Parameters
----------
block : Block or HybridBlock
The block whose parameter is to be applied weight-drop.
local_param_regex : str
The regex for parameter names used in the self.params.get(), such as 'weight'.
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
axes : tuple of int, default ()
The axes on which dropout mask is shared. If empty, regular dropout is applied.
weight_drop_mode : {'training', 'always'}, default 'training'
Whether the weight dropout should be applied only at training time, or always be applied.
Examples
--------
>>> net = gluon.rnn.LSTM(10, num_layers=2, bidirectional=True)
>>> gluonnlp.model.apply_weight_drop(net, r'.*h2h_weight', 0.5)
>>> net.collect_params()
lstm0_ (
Parameter lstm0_l0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
)
>>> ones = mx.nd.ones((3, 4, 5))
>>> net.initialize()
>>> with mx.autograd.train_mode():
... net(ones).max().asscalar() != net(ones).max().asscalar()
True
|
25,241 |
def _write_local_schema_file(self, cursor):
schema = []
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
for name, type in zip(cursor.column_names, cursor.column_types):
schema.append(self.generate_schema_dict(name, type))
json_serialized_schema = json.dumps(schema).encode()
tmp_schema_file_handle.write(json_serialized_schema)
return {self.schema_filename: tmp_schema_file_handle}
|
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
|
25,242 |
def _reorient_3d(image):
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i).original_data))
return new_image
|
Reorganize the data for a 3d nifti
|
25,243 |
def richardson(vals, k, c=None):
if c is None:
c = richardson_parameter(vals, k)
return vals[k] - (vals[k] - vals[k - 1]) / c
|
Richardson extrapolation with parameter estimation
|
25,244 |
def _parse_block(self, block, node):
assert block[self.get_key()] ==
if self.is_compact_ast:
statements = block[]
else:
statements = block[self.get_children()]
for statement in statements:
node = self._parse_statement(statement, node)
return node
|
Return:
Node
|
25,245 |
def _check_iso9660_filename(fullname, interchange_level):
(name, extension, version) = _split_iso9660_filename(fullname)
if version != b and (int(version) < 1 or int(version) > 32767):
raise pycdlibexception.PyCdlibInvalidInput()
if not name and not extension:
raise pycdlibexception.PyCdlibInvalidInput()
if b in name or b in extension:
raise pycdlibexception.PyCdlibInvalidInput()
if interchange_level == 1:
if len(name) > 8 or len(extension) > 3:
raise pycdlibexception.PyCdlibInvalidInput()
else:
pass
if interchange_level < 4:
_check_d1_characters(name)
_check_d1_characters(extension)
|
A function to check that a file identifier conforms to the ISO9660 rules
for a particular interchange level.
Parameters:
fullname - The name to check.
interchange_level - The interchange level to check against.
Returns:
Nothing.
|
25,246 |
def _link_bam_file(in_file, new_dir, data):
new_dir = utils.safe_makedir(new_dir)
out_file = os.path.join(new_dir, os.path.basename(in_file))
if not utils.file_exists(out_file):
out_file = os.path.join(new_dir, "%s-prealign.bam" % dd.get_sample_name(data))
if data.get("cwl_keys"):
if utils.file_exists(in_file + ".bai"):
out_file = in_file
else:
utils.copy_plus(in_file, out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file
|
Provide symlinks of BAM file and existing indexes if needed.
|
25,247 |
def fig3(args):
from jcvi.formats.bed import Bed
p = OptionParser(fig3.__doc__)
p.add_option("--gauge_step", default=10000000, type="int",
help="Step size for the base scale")
opts, args, iopts = p.set_image_options(args, figsize="12x9")
if len(args) != 4:
sys.exit(not p.print_help())
chrs, sizes, bedfile, datadir = args
gauge_step = opts.gauge_step
diverge = iopts.diverge
rr, gg = diverge
chrs = [[x] for x in chrs.split(",")]
sizes = Sizes(sizes).mapping
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
chr_sizes, chr_sum_sizes, ratio = calc_ratio(chrs, sizes)
seqidsfile = make_seqids(chrs)
klayout = make_layout(chrs, chr_sum_sizes, ratio, template_f3a, shift=.05)
height = .07
r = height / 4
K = Karyotype(fig, root, seqidsfile, klayout, gap=gap,
height=height, lw=2, generank=False, sizes=sizes,
heightpad=r, roundrect=True, plot_label=False)
for kl in K.layout:
if kl.empty:
continue
lx, ly = kl.xstart, kl.y
if lx < .11:
lx += .1
ly += .06
label = kl.label
root.text(lx - .015, ly, label, fontsize=15,
ha="right", va="center")
datafiles = ("chrA02.bzh.forxmgr", "parent.A02.per10kb.forxmgr",
"parent.C2.per10kb.forxmgr", "chrC02.bzh.forxmgr")
datafiles = [op.join(datadir, x) for x in datafiles]
tracks = K.tracks
hlfile = op.join(datadir, "bzh.regions.forhaibao")
xy_axes = []
for t, datafile in zip(tracks, datafiles):
ax = make_affix_axis(fig, t, -r, height=2 * r)
xy_axes.append(ax)
chr = t.seqids[0]
xy = XYtrack(ax, datafile, color="lightslategray")
start, end = 0, t.total
xy.interpolate(end)
xy.cap(ymax=40)
xy.import_hlfile(hlfile, chr, diverge=diverge)
xy.draw()
ax.set_xlim(start, end)
gauge_ax = make_affix_axis(fig, t, -r)
adjust_spines(gauge_ax, ["bottom"])
setup_gauge_ax(gauge_ax, start, end, gauge_step)
ax_Ar = make_affix_axis(fig, tracks[1], r, height=r/2)
ax_Co = make_affix_axis(fig, tracks[2], r, height=r/2)
order = Bed(bedfile).order
for asterisk in (False, True):
conversion_track(order, "data/Genes.Converted.seuil.0.6.AtoC.txt",
0, "A02", ax_Ar, rr, asterisk=asterisk)
conversion_track(order, "data/Genes.Converted.seuil.0.6.AtoC.txt",
1, "C2", ax_Co, gg, asterisk=asterisk)
conversion_track(order, "data/Genes.Converted.seuil.0.6.CtoA.txt",
0, "A02", ax_Ar, gg, ypos=1, asterisk=asterisk)
conversion_track(order, "data/Genes.Converted.seuil.0.6.CtoA.txt",
1, "C2", ax_Co, rr, ypos=1, asterisk=asterisk)
Ar, Co = xy_axes[1:3]
annotations = ((Ar, "Bra028920 Bra028897", "center", "1DAn2+"),
(Ar, "Bra020081 Bra020171", "right", "2DAn2+"),
(Ar, "Bra020218 Bra020286", "left", "3DAn2+"),
(Ar, "Bra008143 Bra008167", "left", "4DAn2-"),
(Ar, "Bra029317 Bra029251", "right", "5DAn2+ (GSL)"),
(Co, "Bo2g001000 Bo2g001300", "left", "1DCn2-"),
(Co, "Bo2g018560 Bo2g023700", "right", "2DCn2-"),
(Co, "Bo2g024450 Bo2g025390", "left", "3DCn2-"),
(Co, "Bo2g081060 Bo2g082340", "left", "4DCn2+"),
(Co, "Bo2g161510 Bo2g164260", "right", "5DCn2-"))
for ax, genes, ha, label in annotations:
g1, g2 = genes.split()
x1, x2 = order[g1][1].start, order[g2][1].start
if ha == "center":
x = (x1 + x2) / 2 * .8
elif ha == "left":
x = x2
else:
x = x1
label = r"\textit{{{0}}}".format(label)
color = rr if "+" in label else gg
ax.text(x, 30, label, color=color, fontsize=9, ha=ha, va="center")
ax_Ar.set_xlim(0, tracks[1].total)
ax_Ar.set_ylim(-1, 1)
ax_Co.set_xlim(0, tracks[2].total)
ax_Co.set_ylim(-1, 1)
gstep = 5000000
order = "swede,kale,h165,yudal,aviso,abu,bristol".split(",")
labels_dict = {"h165": "Resynthesized (H165)", "abu": "Aburamasari"}
hlsuffix = "regions.forhaibao"
chr1, chr2 = "chrA02", "chrC02"
t1, t2 = tracks[0], tracks[-1]
s1, s2 = sizes[chr1], sizes[chr2]
canvas1 = (t1.xstart, .75, t1.xend - t1.xstart, .2)
c = Coverage(fig, root, canvas1, chr1, (0, s1), datadir,
order=order, gauge=None, plot_chr_label=False,
gauge_step=gstep, palette="gray",
cap=40, hlsuffix=hlsuffix, labels_dict=labels_dict,
diverge=diverge)
yys = c.yys
x1, x2 = .37, .72
tip = .02
annotations = ((x1, yys[2] + .3 * tip, tip, tip / 2, "FLC"),
(x1, yys[3] + .6 * tip, tip, tip / 2, "FLC"),
(x1, yys[5] + .6 * tip, tip, tip / 2, "FLC"),
(x2, yys[0] + .9 * tip, -1.2 * tip, 0, "GSL"),
(x2, yys[4] + .9 * tip, -1.2 * tip, 0, "GSL"),
(x2, yys[6] + .9 * tip, -1.2 * tip, 0, "GSL"))
arrowprops=dict(facecolor=, shrink=.05, frac=.5,
width=1, headwidth=4)
for x, y, dx, dy, label in annotations:
label = r"\textit{{{0}}}".format(label)
root.annotate(label, xy=(x, y), xytext=(x + dx, y + dy),
arrowprops=arrowprops, color=rr, fontsize=9,
ha="center", va="center")
canvas2 = (t2.xstart, .05, t2.xend - t2.xstart, .2)
Coverage(fig, root, canvas2, chr2, (0, s2), datadir,
order=order, gauge=None, plot_chr_label=False,
gauge_step=gstep, palette="gray",
cap=40, hlsuffix=hlsuffix, labels_dict=labels_dict,
diverge=diverge)
pad = .03
labels = ((.1, .67, "A"), (t1.xstart - 3 * pad, .95 + pad, "B"),
(t2.xstart - 3 * pad, .25 + pad, "C"))
panel_labels(root, labels)
normalize_axes(root)
image_name = "napus-fig3." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
%prog fig3 chrA02,A02,C2,chrC02 chr.sizes all.bed data
Napus Figure 3 displays alignments between quartet chromosomes, inset
with read histograms.
|
25,248 |
def send_command(self, cmd):
logger.debug(.format(cmd))
self.comm_chan.sendall(cmd + )
|
Send a command to the remote SSH server.
:param cmd: The command to send
|
25,249 |
def rgevolve(self, scale_out, **kwargs):
self._check_initial()
return rge.smeft_evolve(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out,
**kwargs)
|
Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`.
|
25,250 |
def create(self, rs_params):
repl_id = rs_params.get(, None)
if repl_id is not None and repl_id in self:
raise ReplicaSetError(
"replica set with id={id} already exists".format(id=repl_id))
repl = ReplicaSet(rs_params)
self[repl.repl_id] = repl
return repl.repl_id
|
create new replica set
Args:
rs_params - replica set configuration
Return repl_id which can use to take the replica set
|
25,251 |
def prod(self, axis=None, dtype=None, out=None, keepdims=False):
return self.elem.__array_ufunc__(
np.multiply, , self.elem,
axis=axis, dtype=dtype, out=(out,), keepdims=keepdims)
|
Return the product of ``self``.
See Also
--------
numpy.prod
sum
|
25,252 |
async def update_firmware(port: str,
firmware_file_path: str,
loop: Optional[asyncio.AbstractEventLoop])\
-> Tuple[str, Tuple[bool, str]]:
ports_before_update = await _discover_ports()
config_file_path = os.path.join(package_root,
, , )
kwargs: Dict[str, Any] = {
: asyncio.subprocess.PIPE,
: asyncio.subprocess.PIPE
}
if loop:
kwargs[] = loop
proc = await asyncio.create_subprocess_exec(
, .format(config_file_path), ,
.format(PART_NO),
.format(PROGRAMMER_ID),
.format(port),
.format(BAUDRATE), ,
.format(firmware_file_path),
**kwargs)
await proc.wait()
_result = await proc.communicate()
result = _result[1].decode()
avrdude_res = _format_avrdude_response(result)
if avrdude_res[0]:
log.debug(result)
else:
log.error("Failed to update module firmware for {}: {}"
.format(port, avrdude_res[1]))
new_port = await _port_on_mode_switch(ports_before_update)
log.info("New port: {}".format(new_port))
return new_port, avrdude_res
|
Run avrdude firmware upload command. Switch back to normal module port
Note: For modules with old bootloader, the kernel could assign the module
a new port after the update (since the board is automatically reset).
Scan for such a port change and use the appropriate port.
Returns a tuple of the new port to communicate on (or None if it was not
found) and a tuple of success and message from avrdude.
|
25,253 |
def _ParseShVariables(self, lines):
paths = {}
for line in lines:
for entry in line:
if "=" in entry:
target, vals = (entry.split("=", 1) + [""])[:2]
if vals:
path_vals = vals.split(":")
else:
path_vals = []
self._ExpandPath(target, path_vals, paths)
elif entry not in self._SH_CONTINUATION:
break
return paths
|
Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values.
|
25,254 |
def button_number(self):
if self.type != EventType.TABLET_PAD_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_button_number(
self._handle)
|
The button number that triggered this event, starting at 0.
For events that are not of type
:attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`,
this property raises :exc:`AttributeError`.
Note that the number returned is a generic sequential button number
and not a semantic button code as defined in ``linux/input.h``.
See `Tablet pad button numbers`_ for more details.
Returns:
int: The button triggering this event.
Raises:
AttributeError
|
25,255 |
def surfacemass(self,R,log=False):
if log:
return -R/self._params[0]
else:
return sc.exp(-R/self._params[0])
|
NAME:
surfacemass
PURPOSE:
return the surface density profile at this R
INPUT:
R - Galactocentric radius (/ro)
log - if True, return the log (default: False)
OUTPUT:
Sigma(R)
HISTORY:
2010-03-26 - Written - Bovy (NYU)
|
25,256 |
def __runTaskMainLoop(self, numIters, learningOffAt=None):
self._model.resetSequenceStates()
self._currentRecordIndex = -1
while True:
if self._isKilled:
break
if self._isCanceled:
break
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
if learningOffAt is not None \
and self._currentRecordIndex == learningOffAt:
self._model.disableLearning()
try:
inputRecord = self._inputSource.getNextRecordDict()
if self._currentRecordIndex < 0:
self._inputSource.setTimeout(10)
except Exception, e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),
traceback.format_exc())
if inputRecord is None:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
result.metrics = self.__metricMgr.update(result)
if not result.metrics:
result.metrics = self.__metricMgr.getMetrics()
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
self._periodic.tick()
if numIters >= 0 and self._currentRecordIndex >= numIters-1:
break
else:
raise ValueError("Got an empty record from FileSource: %r" %
inputRecord)
|
Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
|
25,257 |
def create_task(self, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None):
return tasks_endpoint.create_task(self, list_id, title, assignee_id=assignee_id, completed=completed, recurrence_type=recurrence_type, recurrence_count=recurrence_count, due_date=due_date, starred=starred)
|
Creates a new task with the given information in the list with the given ID
|
25,258 |
def delete(self, cluster):
if cluster.name not in self.clusters:
raise ClusterNotFound(
"Unable to delete non-existent cluster %s" % cluster.name)
del self.clusters[cluster.name]
|
Deletes the cluster from memory.
:param cluster: cluster to delete
:type cluster: :py:class:`elasticluster.cluster.Cluster`
|
25,259 |
def get_size(self, boundary):
if self.filesize is not None:
valuesize = self.filesize
else:
valuesize = len(self.value)
return len(self.encode_hdr(boundary)) + 2 + valuesize
|
Returns the size in bytes that this param will be when encoded
with the given boundary.
|
25,260 |
def masses_of_galaxies_within_ellipses_in_units(self, major_axis : dim.Length, unit_mass=,
critical_surface_density=None):
return list(map(lambda galaxy: galaxy.mass_within_ellipse_in_units(
major_axis=major_axis, unit_mass=unit_mass, kpc_per_arcsec=self.kpc_per_arcsec,
critical_surface_density=critical_surface_density),
self.galaxies))
|
Compute the total mass of all galaxies in this plane within a ellipse of specified major-axis.
See *galaxy.angular_mass_within_ellipse* and *mass_profiles.angular_mass_within_ellipse* for details \
of how this is performed.
Parameters
----------
major_axis : float
The major-axis radius of the ellipse.
units_luminosity : str
The units the luminosity is returned in (eps | counts).
exposure_time : float
The exposure time of the observation, which converts luminosity from electrons per second units to counts.
|
25,261 |
def _normalize_http_methods(http_method):
if http_method.upper() == :
for method in SamApiProvider._ANY_HTTP_METHODS:
yield method.upper()
else:
yield http_method.upper()
|
Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all
supported Http Methods on Api Gateway.
:param str http_method: Http method
:yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods)
|
25,262 |
def filename_items_for_filetype(filenames, filetype_info):
matched_files = []
for pattern in filetype_info[]:
for filename in match_filenames(filenames, pattern):
if filename in matched_files:
continue
try:
filename_info = parse(
pattern, get_filebase(filename, pattern))
except ValueError:
logger.debug("Can't parse %s with %s.", filename, pattern)
continue
matched_files.append(filename)
yield filename, filename_info
|
Iterator over the filenames matching *filetype_info*.
|
25,263 |
def create(self, deviceType):
r = self._apiClient.post("api/v0002/device/types", deviceType)
if r.status_code == 201:
return DeviceType(apiClient=self._apiClient, **r.json())
else:
raise ApiException(r)
|
Register one or more new device types, each request can contain a maximum of 512KB.
|
25,264 |
def allocate(self, size, max_time_to_block_ms):
with self._lock:
if self._free:
return self._free.popleft()
elif self._poolable_size == 0:
return io.BytesIO()
else:
buf = None
more_memory = threading.Condition(self._lock)
self._waiters.append(more_memory)
while buf is None:
start_wait = time.time()
more_memory.wait(max_time_to_block_ms / 1000.0)
end_wait = time.time()
if self.wait_time:
self.wait_time.record(end_wait - start_wait)
if self._free:
buf = self._free.popleft()
else:
self._waiters.remove(more_memory)
raise Errors.KafkaTimeoutError(
"Failed to allocate memory within the configured"
" max blocking time")
removed = self._waiters.popleft()
assert removed is more_memory,
if self._free and self._waiters:
self._waiters[0].notify()
return buf
|
Allocate a buffer of the given size. This method blocks if there is not
enough memory and the buffer pool is configured with blocking mode.
Arguments:
size (int): The buffer size to allocate in bytes [ignored]
max_time_to_block_ms (int): The maximum time in milliseconds to
block for buffer memory to be available
Returns:
io.BytesIO
|
25,265 |
def prod(self):
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"*",
1,
self.weld_type
),
self.weld_type,
0
)
|
Summary
Returns:
TYPE: Description
|
25,266 |
def update_ipsecpolicy(self, ipsecpolicy, body=None):
return self.put(self.ipsecpolicy_path % (ipsecpolicy), body=body)
|
Updates an IPsecPolicy.
|
25,267 |
def capabilities(self):
caps = []
for cap in DeviceCapability:
if self._libinput.libinput_device_has_capability(self._handle, cap):
caps.append(cap)
return tuple(caps)
|
A tuple of capabilities this device supports.
Returns:
(~libinput.constant.DeviceCapability): Device capabilities.
|
25,268 |
def plos_doi_to_xmlurl(doi_string):
doi_url = .format(doi_string)
log.debug(.format(doi_url))
try:
resolved_page = urllib.request.urlopen(doi_url)
except urllib.error.URLError as err:
print()
raise err
else:
resolved_address = resolved_page.geturl()
log.debug(.format(resolved_address))
parsed = urllib.parse.urlparse(resolved_address)
xml_url = .format(parsed.scheme, parsed.netloc)
xml_url +=
xml_path = parsed.path.replace(, ).replace(, )
xml_path = xml_path.split()[1]
xml_url += .format(xml_path, )
log.debug(.format(xml_url))
return xml_url
|
Attempts to resolve a PLoS DOI into a URL path to the XML file.
|
25,269 |
def resize_hess(self, func):
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
out = np.atleast_2d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return np.atleast_2d(out[mask, mask])
return resized
|
Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only.
|
25,270 |
def xlink_href_target(self, node, group=None):
xlink_href = node.attrib.get()
if not xlink_href:
return None
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode())
file_indicator, path = tempfile.mkstemp(suffix= % img_format)
with open(path, ) as fh:
fh.write(image_data)
os.close(file_indicator)
return path
if in xlink_href:
iri, fragment = xlink_href.split(, 1)
else:
iri, fragment = xlink_href, None
if iri:
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
iri = None
if iri:
if path.endswith():
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
try:
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED
|
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
|
25,271 |
def build(self, builder):
params = dict(StudyOID=self.study_oid,
MetaDataVersionOID=self.metadata_version_oid,
EffectiveDate=dt_to_iso8601(self.effective_date))
builder.start("MetaDataVersionRef", params)
builder.end("MetaDataVersionRef")
|
Build XML by appending to builder
|
25,272 |
def exists(table_name, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.describe_table(table_name)
except JSONResponseError as e:
if e.error_code == :
return False
raise
return True
|
Check to see if a table exists.
CLI Example:
.. code-block:: bash
salt myminion boto_dynamodb.exists table_name region=us-east-1
|
25,273 |
def format_axis(ax, label_padding=2, tick_padding=0, yticks_position=):
ax.xaxis.set_ticks_position()
ax.yaxis.set_ticks_position(yticks_position)
ax.yaxis.set_tick_params(which=, direction=, labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.set_tick_params(which=, direction=, labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.labelpad = label_padding
ax.yaxis.labelpad = label_padding
ax.xaxis.label.set_size(fontsize)
ax.yaxis.label.set_size(fontsize)
|
Set standardized axis formatting for figure.
|
25,274 |
def detect(self, tokens):
results = []
for abbr_span, long_span in self.detect_spans(tokens):
results.append((tokens[abbr_span[0]:abbr_span[1]], tokens[long_span[0]:long_span[1]]))
return results
|
Return a (abbr, long) pair for each abbreviation definition.
|
25,275 |
def _from_dict(cls, mapping):
if mapping.get("all"):
return cls(all_=True)
r_mappings = mapping.get("ranges", ())
ranges = [KeyRange(**r_mapping) for r_mapping in r_mappings]
return cls(keys=mapping.get("keys", ()), ranges=ranges)
|
Create an instance from the corresponding state mapping.
:type mapping: dict
:param mapping: the instance state.
|
25,276 |
def _make_futures(futmap_keys, class_check, make_result_fn):
futmap = {}
for key in futmap_keys:
if class_check is not None and not isinstance(key, class_check):
raise ValueError("Expected list of {}".format(type(class_check)))
futmap[key] = concurrent.futures.Future()
if not futmap[key].set_running_or_notify_cancel():
raise RuntimeError("Future was cancelled prematurely")
f = concurrent.futures.Future()
f.add_done_callback(lambda f: make_result_fn(f, futmap))
if not f.set_running_or_notify_cancel():
raise RuntimeError("Future was cancelled prematurely")
return f, futmap
|
Create futures and a futuremap for the keys in futmap_keys,
and create a request-level future to be bassed to the C API.
|
25,277 |
def _on_return(self, text):
else:
image_file = None
text = replace_emoticons(text)
segments = hangups.ChatMessageSegment.from_str(text)
self._coroutine_queue.put(
self._handle_send_message(
self._conversation.send_message(
segments, image_file=image_file
)
)
)
|
Called when the user presses return on the send message widget.
|
25,278 |
def platform_mapped(func):
def inner(*args, **kwargs):
from rez.config import config
result = func(*args, **kwargs)
entry = config.platform_map.get(func.__name__)
if entry:
for key, value in entry.iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return inner
|
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14", # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit (don't)
"amd64": "64bit",
},
}
|
25,279 |
def _lons(self):
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons
|
Return the longitudes (in degrees) of the gridded data.
|
25,280 |
def _patch_distribution_metadata():
for attr in (, , ):
new_val = getattr(setuptools.dist, attr)
setattr(distutils.dist.DistributionMetadata, attr, new_val)
|
Patch write_pkg_file and read_pkg_file for higher metadata standards
|
25,281 |
def dump(obj, fp, **kwargs):
return json.dump(obj, fp, cls=BioCJSONEncoder, **kwargs)
|
Serialize obj as a JSON formatted stream to fp (a .write()-supporting file-like object)
|
25,282 |
def spd_eig(W, epsilon=1e-10, method=, canonical_signs=False):
assert _np.allclose(W.T, W),
if method.lower() == :
from .eig_qr.eig_qr import eig_qr
s, V = eig_qr(W)
elif method.lower() == :
from scipy.linalg import schur
S, V = schur(W)
s = _np.diag(S)
else:
raise ValueError( + method)
s, V = sort_by_norm(s, V)
evmin = _np.min(s)
if evmin < 0:
epsilon = max(epsilon, -evmin + 1e-16)
evnorms = _np.abs(s)
n = _np.shape(evnorms)[0]
m = n - _np.searchsorted(evnorms[::-1], epsilon)
if m == 0:
raise _ZeroRankError(%epsilon)
Vm = V[:, 0:m]
sm = s[0:m]
if canonical_signs:
for j in range(m):
jj = _np.argmax(_np.abs(Vm[:, j]))
Vm[:, j] *= _np.sign(Vm[jj, j])
return sm, Vm
|
Rank-reduced eigenvalue decomposition of symmetric positive definite matrix.
Removes all negligible eigenvalues
Parameters
----------
W : ndarray((n, n), dtype=float)
Symmetric positive-definite (spd) matrix.
epsilon : float
Truncation parameter. Eigenvalues with norms smaller than this cutoff will
be removed.
method : str
Method to perform the decomposition of :math:`W` before inverting. Options are:
* 'QR': QR-based robust eigenvalue decomposition of W
* 'schur': Schur decomposition of W
canonical_signs : boolean, default = False
Fix signs in V, s. t. the largest element of in every row of V is positive.
Returns
-------
s : ndarray(k)
k non-negligible eigenvalues, sorted by descending norms
V : ndarray(n, k)
k leading eigenvectors
|
25,283 |
def forwards(self, orm):
"Write your forwards methods here."
for instance in orm.LocationSource.objects.all():
try:
instance.user = instance.points.all()[0].user
instance.save()
except IndexError:
instance.delete()
|
Write your forwards methods here.
|
25,284 |
def finalize(self, **kwargs):
self.set_title(
"{} Ranking of {} Features".format(
self.ranking_.title(), len(self.features_)
)
)
|
Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: dict
generic keyword arguments
|
25,285 |
def is_compression_coordinate(ds, variable):
if not is_coordinate_variable(ds, variable):
return False
compress = getattr(ds.variables[variable], , None)
if not isinstance(compress, basestring):
return False
if not compress:
return False
if variable in compress:
return False
for dim in compress.split():
if dim not in ds.dimensions:
return False
return True
|
Returns True if the variable is a coordinate variable that defines a
compression scheme.
:param netCDF4.Dataset nc: An open netCDF dataset
:param str variable: Variable name
|
25,286 |
def _init(self):
self._ready = self._file.read(1)
self._hall_left = self._file.read(2)
self._hall_right = self._file.read(2)
self._carriage_type = self._file.read(1)[0]
self._carriage_position = self._file.read(1)[0]
|
Read the success byte.
|
25,287 |
def AddDescriptor(self, desc):
if not isinstance(desc, descriptor.Descriptor):
raise TypeError()
self._descriptors[desc.full_name] = desc
self._AddFileDescriptor(desc.file)
|
Adds a Descriptor to the pool, non-recursively.
If the Descriptor contains nested messages or enums, the caller must
explicitly register them. This method also registers the FileDescriptor
associated with the message.
Args:
desc: A Descriptor.
|
25,288 |
def analyze(self, handle, filename):
files = {"file": (filename, handle)}
handle.seek(0)
response = self._request("tasks/create/file", method=, files=files)
try:
return str(json.loads(response.content.decode())["task_id"])
except KeyError:
return str(json.loads(response.content.decode())["task_ids"][0])
|
Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: Task ID as a string
|
25,289 |
def get_build_config_by_labels_filtered(self, label_selectors, filter_key, filter_value):
items = self.get_all_build_configs_by_labels(label_selectors)
if filter_value is not None:
build_configs = []
for build_config in items:
match_value = graceful_chain_get(build_config, *filter_key.split())
if filter_value == match_value:
build_configs.append(build_config)
items = build_configs
if not items:
raise OsbsException(
"Build config not found for labels: %r" %
(label_selectors, ))
if len(items) > 1:
raise OsbsException(
"More than one build config found for labels: %r" %
(label_selectors, ))
return items[0]
|
Returns a build config matching the given label selectors, filtering against
another predetermined value. This method will raise OsbsException
if not exactly one build config is found after filtering.
|
25,290 |
def download(self, filename, format=, overwrite=False, resolvers=None, **kwargs):
download(self.input, filename, format, overwrite, resolvers, **kwargs)
|
Download the resolved structure as a file
|
25,291 |
def last_ser(self):
last_xChart = self.xCharts[-1]
sers = last_xChart.sers
if not sers:
return None
return sers[-1]
|
Return the last `<c:ser>` element in the last xChart element, based
on series order (not necessarily the same element as document order).
|
25,292 |
def stars_list(self, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
return self.api_call("stars.list", http_verb="GET", params=kwargs)
|
Lists stars for a user.
|
25,293 |
def read_pickle(fn):
with misc.open_plain_or_gzip(fn, ) as fh:
parser = pickle.load(fh)
return parser
|
Load a GOParser object from a pickle file.
The function automatically detects whether the file is compressed
with gzip.
Parameters
----------
fn: str
Path of the pickle file.
Returns
-------
`GOParser`
The GOParser object stored in the pickle file.
|
25,294 |
def extracting(self, *names, **kwargs):
if not isinstance(self.val, Iterable):
raise TypeError()
if isinstance(self.val, str_types):
raise TypeError()
if len(names) == 0:
raise ValueError()
def _extract(x, name):
if self._check_dict_like(x, check_values=False, return_as_bool=True):
if name in x:
return x[name]
else:
raise ValueError( % (list(x.keys()), name))
elif isinstance(x, Iterable):
self._check_iterable(x, name=)
return x[name]
elif hasattr(x, name):
attr = getattr(x, name)
if callable(attr):
try:
return attr()
except TypeError:
raise ValueError( % name)
else:
return attr
else:
raise ValueError( % name)
def _filter(x):
if in kwargs:
if isinstance(kwargs[], str_types):
return bool(_extract(x, kwargs[]))
elif self._check_dict_like(kwargs[], check_values=False, return_as_bool=True):
for k in kwargs[]:
if isinstance(k, str_types):
if _extract(x, k) != kwargs[][k]:
return False
return True
elif callable(kwargs[]):
return kwargs[](x)
return False
return True
def _sort(x):
if in kwargs:
if isinstance(kwargs[], str_types):
return _extract(x, kwargs[])
elif isinstance(kwargs[], Iterable):
items = []
for k in kwargs[]:
if isinstance(k, str_types):
items.append(_extract(x, k))
return tuple(items)
elif callable(kwargs[]):
return kwargs[](x)
return 0
extracted = []
for i in sorted(self.val, key=lambda x: _sort(x)):
if _filter(i):
items = [_extract(i, name) for name in names]
extracted.append(tuple(items) if len(items) > 1 else items[0])
return AssertionBuilder(extracted, self.description, self.kind)
|
Asserts that val is collection, then extracts the named properties or named zero-arg methods into a list (or list of tuples if multiple names are given).
|
25,295 |
def std(self, n, array=False):
result = talib.STDDEV(self.close, n)
if array:
return result
return result[-1]
|
标准差
|
25,296 |
def cluster_health(index=None, level=, local=False, hosts=None, profile=None):
clusterclusterindicesshards
es = _get_instance(hosts, profile)
try:
return es.cluster.health(index=index, level=level, local=local)
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot retrieve health information, server returned code {0} with message {1}".format(e.status_code, e.error))
|
.. versionadded:: 2017.7.0
Return Elasticsearch cluster health.
index
Limit the information returned to a specific index
level
Specify the level of detail for returned information, default 'cluster', valid choices are: 'cluster', 'indices', 'shards'
local
Return local information, do not retrieve the state from master node
CLI example::
salt myminion elasticsearch.cluster_health
|
25,297 |
def read_cstring(self) -> bool:
self._stream.save_context()
idx = self._stream.index
if self.read_char("\"") and self.read_until("\"", "\\"):
txt = self._stream[idx:self._stream.index]
return self._stream.validate_context()
return self._stream.restore_context()
|
read a double quoted string
Read following BNF rule else return False::
'"' -> ['\\' #char | ~'\\'] '"'
|
25,298 |
def format_progress(self, width):
chunk_widths = self._get_chunk_sizes(width)
progress_chunks = [chunk.format_chunk(chunk_width)
for (chunk, chunk_width)
in zip(self._progress_chunks, chunk_widths)]
return "{sep_start}{progress}{sep_end}".format(
sep_start=self.sep_start,
progress="".join(progress_chunks),
sep_end=self.sep_end
)
|
Create the formatted string that displays the progress.
|
25,299 |
def H13(self):
"Information measure of correlation 2."
return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9())))
|
Information measure of correlation 2.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.