Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
379,300 | def to_input_req(self):
return {
WARM_START_TYPE: self.type.value,
PARENT_HYPERPARAMETER_TUNING_JOBS: [{HYPERPARAMETER_TUNING_JOB_NAME: parent} for parent in self.parents]
} | Converts the ``self`` instance to the desired input request format.
Returns:
dict: Containing the "WarmStartType" and "ParentHyperParameterTuningJobs" as the first class fields.
Examples:
>>> warm_start_config = WarmStartConfig(warm_start_type=WarmStartTypes.TransferLearning,parents=["p1,p2"])
>>> warm_start_config.to_input_req()
{
"WarmStartType":"TransferLearning",
"ParentHyperParameterTuningJobs": [
{'HyperParameterTuningJobName': "p1"},
{'HyperParameterTuningJobName': "p2"},
]
} |
379,301 | def remove_leading_zeros(num: str) -> str:
if not num:
return num
if num.startswith():
ret = + num[1:].lstrip()
elif num.startswith():
ret = + num[1:].lstrip()
else:
ret = num.lstrip()
return if ret in (, , ) else ret | Strips zeros while handling -, M, and empty strings |
379,302 | def dSbr_dV(self, Yf, Yt, V, buses=None, branches=None):
buses = self.buses if buses is None else buses
branches = self.branches if branches is None else branches
nl = len(branches)
nb = len(V)
il = range(nl)
ib = range(nb)
f = [l.from_bus._i for l in branches]
t = [l.to_bus._i for l in branches]
If = Yf * V
It = Yt * V
Vnorm = V / abs(V)
diagVf = csr_matrix((V[f], (il, il)))
diagIf = csr_matrix((If, (il, il)))
diagVt = csr_matrix((V[t], (il, il)))
diagIt = csr_matrix((It, (il, il)))
diagV = csr_matrix((V, (ib, ib)))
diagVnorm = csr_matrix((Vnorm, (ib, ib)))
shape = (nl, nb)
dSf_dVa = 1j * (conj(diagIf) *
csr_matrix((V[f], (il, f)), shape) - diagVf * conj(Yf * diagV))
dSt_dVa = 1j * (conj(diagIt) *
csr_matrix((V[t], (il, t)), shape) - diagVt * conj(Yt * diagV))
dSf_dVm = diagVf * conj(Yf * diagVnorm) + conj(diagIf) * \
csr_matrix((Vnorm[f], (il, f)), shape)
dSt_dVm = diagVt * conj(Yt * diagVnorm) + conj(diagIt) * \
csr_matrix((Vnorm[t], (il, t)), shape)
Sf = V[f] * conj(If)
St = V[t] * conj(It)
return dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St | Based on dSbr_dV.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@return: The branch power flow vectors and the partial derivatives of
branch power flow w.r.t voltage magnitude and voltage angle.
@rtype: tuple |
379,303 | def get(self):
return ExecutionContextContext(
self._version,
flow_sid=self._solution[],
execution_sid=self._solution[],
) | Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext |
379,304 | def JoinPath(self, path_segments):
first_path_segment = None
if path_segments and platform.system() == :
first_path_segment = path_segments[0]
first_path_segment_length = len(first_path_segment)
first_path_segment_prefix = None
if (first_path_segment_length >= 7 and
first_path_segment.startswith() and
first_path_segment[5:7] == ):
first_path_segment_prefix = first_path_segment[4:6]
first_path_segment = first_path_segment[7:]
elif (first_path_segment_length >= 4 and
first_path_segment[:4] in [, ]):
first_path_segment_prefix = first_path_segment[:4]
first_path_segment = first_path_segment[4:]
elif first_path_segment_length >= 2 and first_path_segment[1] == :
first_path_segment_prefix = first_path_segment[:2]
first_path_segment = first_path_segment[2:]
elif first_path_segment.startswith():
prefix, _, remainder = first_path_segment[2:].partition(
self.PATH_SEPARATOR)
first_path_segment_prefix = .format(prefix)
first_path_segment = .format(remainder)
if first_path_segment_prefix:
first_path_segment, _, remainder = first_path_segment.partition(
self.PATH_SEPARATOR)
if not remainder:
_ = path_segments.pop(0)
else:
path_segments[0] = remainder
first_path_segment = .join([
first_path_segment_prefix, first_path_segment])
else:
first_path_segment = None
path_segments = [
segment.split(self.PATH_SEPARATOR) for segment in path_segments]
path_segments = [
element for sublist in path_segments for element in sublist]
path_segments = list(filter(None, path_segments))
if first_path_segment is None:
path = .format(
self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))
else:
path = first_path_segment
if path_segments:
path = .format(
path, self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))
return path | Joins the path segments into a path.
Args:
path_segments (list[str]): path segments.
Returns:
str: joined path segments prefixed with the path separator. |
379,305 | def within_line(self, viewer, points, p_start, p_stop, canvas_radius):
scale_x, scale_y = viewer.get_scale_xy()
new_radius = canvas_radius * 1.0 / min(scale_x, scale_y)
return self.point_within_line(points, p_start, p_stop, new_radius) | Points `points` and line endpoints `p_start`, `p_stop` are in
data coordinates.
Return True for points within the line defined by a line from
p_start to p_end and within `canvas_radius`.
The distance between points is scaled by the viewer's canvas scale. |
379,306 | def variant_stats_from_variant(variant,
metadata,
merge_fn=(lambda all_stats: \
max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))):
all_stats = []
for (variant_file, variant_metadata) in metadata.items():
if _vcf_is_maf(variant_file=variant_file):
stats = maf_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_strelka(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = strelka_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_mutect(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = mutect_somatic_variant_stats(variant, variant_metadata)
else:
raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file))
all_stats.append(stats)
return merge_fn(all_stats) | Parse the variant calling stats from a variant called from multiple variant files. The stats are merged
based on `merge_fn`
Parameters
----------
variant : varcode.Variant
metadata : dict
Dictionary of variant file to variant calling metadata from that file
merge_fn : function
Function from list of SomaticVariantStats to single SomaticVariantStats.
This is used if a variant is called by multiple callers or appears in multiple VCFs.
By default, this uses the data from the caller that had a higher tumor depth.
Returns
-------
SomaticVariantStats |
379,307 | def parse_name(self):
name = []
while True:
part = self.match_string_pattern(spat.alphau, spat.alphal)
if part == :
break
self.eat_string(part)
name.append(part)
if self.get_char() == :
self.eat_length(1)
if not len(name):
raise PartpyError(self, )
return .join(name) | This function uses string patterns to match a title cased name.
This is done in a loop until there are no more names to match so as
to be able to include surnames etc. in the output. |
379,308 | def write_uint16(self, value, little_endian=True):
if little_endian:
endian = "<"
else:
endian = ">"
return self.pack( % endian, value) | Pack the value as an unsigned integer and write 2 bytes to the stream.
Args:
value:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written. |
379,309 | def hard_wrap(self):
self.linebreak = re.compile(r)
self.text = re.compile(
r
) | Grammar for hard wrap linebreak. You don't need to add two
spaces at the end of a line. |
379,310 | def create_tx(self, txins=None, txouts=None, lock_time=0):
txins = [] if txins is None else txins
txouts = [] if txouts is None else txouts
lock_time = deserialize.positive_integer(lock_time)
txins = deserialize.txins(txins)
txouts = deserialize.txouts(self.testnet, txouts)
tx = control.create_tx(self.service, self.testnet, txins, txouts,
lock_time=lock_time)
return serialize.tx(tx) | Create unsigned rawtx with given txins/txouts as json data.
<txins>: '[{"txid" : hexdata, "index" : integer}, ...]'
<txouts>: '[{"address" : hexdata, "value" : satoshis}, ...]' |
379,311 | def do_handle_log(self, workunit, level, *msg_elements):
if not self.is_under_main_root(workunit):
return
if self.use_color_for_workunit(workunit, self.settings.color):
msg = self._COLOR_BY_LEVEL.get(level, lambda x: x)(msg)
self.emit(self._prefix(workunit, msg))
self.flush() | Implementation of Reporter callback. |
379,312 | def _fetch(self, request):
client = self.client
call = Call(__id__=client.newCall(request.request))
call.enqueue(request.handler)
request.call = call | Fetch using the OkHttpClient |
379,313 | def _make_path(self, items):
s url pattern and replaces instances of <var_name>
with the appropriate value from the items dict.
Value "%s" for key "%s" must be an instance of basestring{%s}' % key, val)
return path | Returns a relative path for the given dictionary of items.
Uses this url rule's url pattern and replaces instances of <var_name>
with the appropriate value from the items dict. |
379,314 | def evaluate_policy(self, sigma):
if self.beta == 1:
raise NotImplementedError(self._error_msg_no_discounting)
R_sigma, Q_sigma = self.RQ_sigma(sigma)
b = R_sigma
A = self._I - self.beta * Q_sigma
v_sigma = self._lineq_solve(A, b)
return v_sigma | Compute the value of a policy.
Parameters
----------
sigma : array_like(int, ndim=1)
Policy vector, of length n.
Returns
-------
v_sigma : ndarray(float, ndim=1)
Value vector of `sigma`, of length n. |
379,315 | def _maybe_club(self, list_of_dicts):
column_headers = JsonConverter._list_of_dicts_to_column_headers(list_of_dicts)
if column_headers is None:
html_output = self._markup(list_of_dicts)
else:
html_output = self._table_opening_tag
html_output += self._markup_header_row(column_headers)
for list_entry in list_of_dicts:
html_output += "<tr><td>"
html_output += "</td><td>".join(self._markup(list_entry[column_header]) for column_header in column_headers)
html_output += "</td></tr>"
html_output += "</table>"
return self._markup_table_cell(html_output) | If all keys in a list of dicts are identical, values from each ``dict``
are clubbed, i.e. inserted under a common column heading. If the keys
are not identical ``None`` is returned, and the list should be converted
to HTML per the normal ``convert`` function.
Parameters
----------
list_of_dicts : list
List to attempt to club.
Returns
-------
str or None
String of HTML if list was successfully clubbed. Returns ``None`` otherwise.
Example
-------
Given the following json object::
{
"sampleData": [
{"a":1, "b":2, "c":3},
{"a":5, "b":6, "c":7}]
}
Calling ``_maybe_club`` would result in the following HTML table:
_____________________________
| | | | |
| | a | c | b |
| sampleData |---|---|---|
| | 1 | 3 | 2 |
| | 5 | 7 | 6 |
-----------------------------
Adapted from a contribution from @muellermichel to ``json2html``. |
379,316 | def __remove_index(self, ids):
if not ids:
return
ids = ",".join((str(id) for id in ids))
self.execute("DELETE FROM fact_index where id in (%s)" % ids) | remove affected ids from the index |
379,317 | def _collective_with_groups(self, x, mesh_axes, collective):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if len(mesh_axes) == self.ndims:
return self.LaidOutTensor(collective(x.tensor_list, self._devices))
else:
groups = mtf.processor_groups(self.shape, mesh_axes)
ret = [None] * self.size
for g in groups:
inputs = [x.tensor_list[pnum] for pnum in g]
devices = [self._devices[pnum] for pnum in g]
reduced = collective(inputs, devices)
for pnum, y in zip(g, reduced):
ret[pnum] = y
return self.LaidOutTensor(ret) | Grouped collective, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor)
Returns:
a LaidOutTensor |
379,318 | def book(symbol=None, token=, version=):
_raiseIfNotStr(symbol)
if symbol:
return _getJson( + symbol, token, version)
return _getJson(, token, version) | Book shows IEX’s bids and asks for given symbols.
https://iexcloud.io/docs/api/#deep-book
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result |
379,319 | def html_entity_decode_codepoint(self, m,
defs=htmlentities.codepoint2name):
try:
char = defs[m.group(1)]
return "&{char};".format(char=char)
except ValueError:
return m.group(0)
except KeyError:
return m.group(0) | decode html entity into one of the codepoint2name |
379,320 | def besj(self, x, n):
if n < 0:
return 0.0
d = 1e-6
b = 0
if math.fabs(x) <= d:
if n != 0:
return 0
return 1
b1 = 0
m1 = int(math.fabs(x)) + 6
if math.fabs(x) > 5:
m1 = int(math.fabs(1.4 * x + 60 / x))
m2 = int(n + 2 + math.fabs(x) / 4)
if m1 > m2:
m2 = m1
while True:
c3 = 0
c2 = 1e-30
c4 = 0
m8 = 1
if m2 / 2 * 2 == m2:
m8 = -1
imax = m2 - 2
for i in range(1, imax+1):
c6 = 2 * (m2 - i) * c2 / x - c3
c3 = c2
c2 = c6
if m2 - i - 1 == n:
b = c6
m8 = -1 * m8
if m8 > 0:
c4 = c4 + 2 * c6
c6 = 2 * c2 / x - c3
if n == 0:
b = c6
c4 += c6
b /= c4
if math.fabs(b - b1) < d:
return b
b1 = b
m2 += 3 | Function BESJ calculates Bessel function of first kind of order n
Arguments:
n - an integer (>=0), the order
x - value at which the Bessel function is required
--------------------
C++ Mathematical Library
Converted from equivalent FORTRAN library
Converted by Gareth Walker for use by course 392 computational project
All functions tested and yield the same results as the corresponding
FORTRAN versions.
If you have any problems using these functions please report them to
[email protected]
Documentation available on the web
http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html
Version 1.0 8/98
29 October, 1999
--------------------
Adapted for use in AGG library by
Andy Wilk ([email protected])
Adapted for use in vispy library by
Nicolas P. Rougier ([email protected])
----------------------------------------------------------------------- |
379,321 | def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
e = y - inv_link_f
d3lik_dlink3 = ( -(2*(self.v + 1)*(-e)*(e**2 - 3*self.v*self.sigma2)) /
((e**2 + self.sigma2*self.v)**3)
)
return d3lik_dlink3 | Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{-2(v+1)((y_{i} - \lambda(f_{i}))^3 - 3(y_{i} - \lambda(f_{i})) \\sigma^{2} v))}{((y_{i} - \lambda(f_{i})) + \\sigma^{2} v)^3}
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: third derivative of likelihood evaluated at points f
:rtype: Nx1 array |
379,322 | def parse_environment_data(block):
warnings.warn(
"Process.parse_environment_data() is deprecated" \
" since WinAppDbg 1.5.",
DeprecationWarning)
environment = dict()
if not block:
return environment
gst = win32.GuessStringType
if type(block[0]) == gst.t_ansi:
equals =
terminator =
else:
equals = u
terminator = u
for chunk in block:
sep = chunk.find(equals, 1)
if sep < 0:
continue
key, value = chunk[:sep], chunk[sep+1:]
if key not in environment:
environment[key] = value
else:
environment[key] += terminator + value
return environment | Parse the environment block into a Python dictionary.
@warn: Deprecated since WinAppDbg 1.5.
@note: Values of duplicated keys are joined using null characters.
@type block: list of str
@param block: List of strings as returned by L{get_environment_data}.
@rtype: dict(str S{->} str)
@return: Dictionary of environment keys and values. |
379,323 | def update_batch(self, **kwargs):
items = kwargs.pop("items")
def transform(item):
item[self.identifier_key] = item.pop("id")
return item
return dict(
items=[
self.replace(**transform(item))
for item in items
],
) | Simplistic batch update operation implemented in terms of `replace()`.
Assumes that:
- Request and response schemas contains lists of items.
- Request items define a primary key identifier
- The entire batch succeeds or fails together. |
379,324 | def getAllClasses(self, hide_base_schemas=True, hide_implicit_types=True):
query =
BIT_BASE_SCHEMAS =
BIT_IMPLICIT_TYPES =
if hide_base_schemas == False:
BIT_BASE_SCHEMAS = ""
if hide_implicit_types == True:
BIT_IMPLICIT_TYPES = ""
query = query % (BIT_IMPLICIT_TYPES, BIT_BASE_SCHEMAS)
qres = self.rdflib_graph.query(query)
return list(qres) | * hide_base_schemas: by default, obscure all RDF/RDFS/OWL/XML stuff
* hide_implicit_types: don't make any inference based on rdf:type declarations |
379,325 | def resetPassword(self, attempt, newPassword):
self.accountByAddress(attempt.username).password = newPassword
self.store.query(
PersistentSession,
PersistentSession.authenticatedAs == str(attempt.username)
).deleteFromStore()
attempt.deleteFromStore() | @param attempt: L{_PasswordResetAttempt}
reset the password of the user who initiated C{attempt} to C{newPassword},
and afterward, delete the attempt and any persistent sessions that belong
to the user |
379,326 | def fit_naa(self, reject_outliers=3.0, fit_lb=1.8, fit_ub=2.4,
phase_correct=True):
model, signal, params = ana.fit_lorentzian(self.diff_spectra,
self.f_ppm,
lb=fit_lb,
ub=fit_ub)
self.naa_model = model
self.naa_signal = signal
self.naa_params = params
self.naa_idx = ut.make_idx(self.f_ppm, fit_lb, fit_ub)
mean_params = stats.nanmean(params, 0)
self.naa_auc = self._calc_auc(ut.lorentzian, params, self.naa_idx) | Fit a Lorentzian function to the NAA peak at ~ 2 ppm. Example of
fitting inverted peak: Foerster et al. 2013, An imbalance between
excitatory and inhibitory neurotransmitters in amyothrophic lateral
sclerosis revealed by use of 3T proton MRS |
379,327 | def midi_outputs(self):
return self.client.get_ports(is_midi=True, is_physical=True, is_output=True) | :return: A list of MIDI output :class:`Ports`. |
379,328 | def get_diff_amounts(self):
diffs = []
last_commit = None
for commit in self.repo.iter_commits():
if last_commit is not None:
diff = self.get_diff(commit.hexsha, last_commit.hexsha)
total_changed = diff[Diff.ADD] + diff[Diff.DEL]
diffs.append(total_changed)
last_commit = commit
return diffs | Gets list of total diff
:return: List of total diff between 2 consecutive commits since start |
379,329 | def delete(self, force=False):
if not force and self._state not in (
Bundle.INSTALLED,
Bundle.RESOLVED,
Bundle.STOPPING,
):
_logger.warning("Trying to delete an active framework")
return False
return FrameworkFactory.delete_framework(self) | Deletes the current framework
:param force: If True, stops the framework before deleting it
:return: True if the framework has been delete, False if is couldn't |
379,330 | def get_instance(self, payload):
return EngagementContextInstance(
self._version,
payload,
flow_sid=self._solution[],
engagement_sid=self._solution[],
) | Build an instance of EngagementContextInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextInstance
:rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextInstance |
379,331 | def parse_mapping(mapping_file: Optional[str]) -> configparser.ConfigParser:
LOGGER.debug(, mapping_file)
def parse(mapping_file):
config = configparser.ConfigParser()
config.read_file(mapping_file)
return config
if mapping_file is not None:
LOGGER.debug()
return parse(mapping_file)
xdg_config_dir = xdg.BaseDirectory.load_first_config()
if xdg_config_dir is None:
raise RuntimeError(
.format(
config_file=DEFAULT_CONFIG_FILE))
mapping_file = os.path.join(xdg_config_dir, CONFIG_FILE_NAME)
LOGGER.debug(, mapping_file)
with open(mapping_file, ) as file_handle:
return parse(file_handle) | Parse the file containing the mappings from hosts to pass entries.
Args:
mapping_file:
Name of the file to parse. If ``None``, the default file from the
XDG location is used. |
379,332 | def import_from_netcdf(network, path, skip_time=False):
assert has_xarray, "xarray must be installed for netCDF support."
basename = os.path.basename(path) if isinstance(path, string_types) else None
with ImporterNetCDF(path=path) as importer:
_import_from_importer(network, importer, basename=basename,
skip_time=skip_time) | Import network data from netCDF file or xarray Dataset at `path`.
Parameters
----------
path : string|xr.Dataset
Path to netCDF dataset or instance of xarray Dataset
skip_time : bool, default False
Skip reading in time dependent attributes |
379,333 | def get_node_affiliations(self, jid, node):
iq = aioxmpp.stanza.IQ(
type_=aioxmpp.structs.IQType.GET,
to=jid,
payload=pubsub_xso.OwnerRequest(
pubsub_xso.OwnerAffiliations(node),
)
)
return (yield from self.client.send(iq)) | Return the affiliations of other jids at a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the node to query
:type node: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The response from the service.
:rtype: :class:`.xso.OwnerRequest`
The affiliations are returned as :class:`.xso.OwnerRequest` instance
whose :attr:`~.xso.OwnerRequest.payload` is a
:class:`.xso.OwnerAffiliations` instance. |
379,334 | async def _get_descriptions(self):
self.fields = []
self.converters = []
use_unicode = self.connection.use_unicode
conn_encoding = self.connection.encoding
description = []
for i in range(self.field_count):
field = await self.connection._read_packet(
FieldDescriptorPacket)
self.fields.append(field)
description.append(field.description())
field_type = field.type_code
if use_unicode:
if field_type == FIELD_TYPE.JSON:
encoding = conn_encoding
elif field_type in TEXT_TYPES:
if field.charsetnr == 63:
encoding = None
else:
encoding = conn_encoding
else:
encoding =
else:
encoding = None
converter = self.connection.decoders.get(field_type)
if converter is through:
converter = None
self.converters.append((encoding, converter))
eof_packet = await self.connection._read_packet()
assert eof_packet.is_eof_packet(),
self.description = tuple(description) | Read a column descriptor packet for each column in the result. |
379,335 | def resolve_for(self, node, exact=None):
from capybara.driver.node import Node
from capybara.node.element import Element
from capybara.node.simple import Simple
@node.synchronize
def resolve():
if self.selector.format == "css":
children = node._find_css(self.css())
else:
children = node._find_xpath(self.xpath(exact))
def wrap(child):
if isinstance(child, Node):
return Element(node.session, child, node, self)
else:
return Simple(child)
children = [wrap(child) for child in children]
return Result(children, self)
return resolve() | Resolves this query relative to the given node.
Args:
node (node.Base): The node relative to which this query should be resolved.
exact (bool, optional): Whether to exactly match text.
Returns:
list[Element]: A list of elements matched by this query. |
379,336 | def get_connection(self, name):
return self._api_get(.format(
urllib.parse.quote_plus(name)
)) | An individual connection.
:param name: The connection name
:type name: str |
379,337 | def _maybe_start_instance(instance):
if not instance:
return
if instance.state[] == :
instance.start()
while True:
print(f"Waiting for {instance} to start.")
instance.reload()
if instance.state[] == :
break
time.sleep(10) | Starts instance if it's stopped, no-op otherwise. |
379,338 | def to_native(key):
item = find(whatever=key)
if not item:
raise NonExistentLanguageError()
return item[u] | Find the native name for the language specified by key.
>>> to_native('br')
u'brezhoneg'
>>> to_native('sw')
u'Kiswahili' |
379,339 | def save_raw_data_from_data_queue(data_queue, filename, mode=, title=, scan_parameters=None):
if not scan_parameters:
scan_parameters = {}
with open_raw_data_file(filename, mode=, title=, scan_parameters=list(dict.iterkeys(scan_parameters))) as raw_data_file:
raw_data_file.append(data_queue, scan_parameters=scan_parameters) | Writing raw data file from data queue
If you need to write raw data once in a while this function may make it easy for you. |
379,340 | def filter_macro(func, *args, **kwargs):
filter_partial = partial(func, *args, **kwargs)
class FilterMacroMeta(FilterMeta):
@staticmethod
def __new__(mcs, name, bases, attrs):
for attr in WRAPPER_ASSIGNMENTS:
if hasattr(func, attr):
attrs[attr] = getattr(func, attr)
return super(FilterMacroMeta, mcs)\
.__new__(mcs, func.__name__, bases, attrs)
def __call__(cls, *runtime_args, **runtime_kwargs):
return filter_partial(*runtime_args, **runtime_kwargs)
class FilterMacro(with_metaclass(FilterMacroMeta, FilterMacroType)):
def _apply(self, value):
return self.__class__()._apply(value)
return FilterMacro | Promotes a function that returns a filter into its own filter type.
Example::
@filter_macro
def String():
return Unicode | Strip | NotEmpty
# You can now use `String` anywhere you would use a regular Filter:
(String | Split(':')).apply('...')
You can also use ``filter_macro`` to create partials, allowing you to
preset one or more initialization arguments::
Minor = filter_macro(Max, max_value=18, inclusive=False)
Minor(inclusive=True).apply(18) |
379,341 | def activate(self, resource=None, timeout=3, wait_for_finish=False):
return Task.execute(self, , json={: resource},
timeout=timeout, wait_for_finish=wait_for_finish) | Activate this package on the SMC
:param list resource: node href's to activate on. Resource is only
required for software upgrades
:param int timeout: timeout between queries
:raises TaskRunFailed: failure during activation (downloading, etc)
:rtype: TaskOperationPoller |
379,342 | def path_to_songname(path: str)->str:
return os.path.splitext(os.path.basename(path))[0] | Extracts song name from a filepath. Used to identify which songs
have already been fingerprinted on disk. |
379,343 | def bschoc(value, ndim, lenvals, array, order):
value = stypes.stringToCharP(value)
ndim = ctypes.c_int(ndim)
lenvals = ctypes.c_int(lenvals)
array = stypes.listToCharArrayPtr(array, xLen=lenvals, yLen=ndim)
order = stypes.toIntVector(order)
return libspice.bschoc_c(value, ndim, lenvals, array, order) | Do a binary search for a given value within a character string array,
accompanied by an order vector. Return the index of the matching array
entry, or -1 if the key value is not found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bschoc_c.html
:param value: Key value to be found in array.
:type value: str
:param ndim: Dimension of array.
:type ndim: int
:param lenvals: String length.
:type lenvals: int
:param array: Character string array to search.
:type array: list of strings
:param order: Order vector.
:type order: Array of ints
:return: index
:rtype: int |
379,344 | def update_points(self):
x, y, w, h = self.x, self.y, self.w, self.h
self.points = (x, y, x + w, y, x + w, y + h, x, y + h) | 统一变为多个点组成的多边形,用于处理碰撞 |
379,345 | def ystep(self):
r
self.Y = np.asarray(sp.prox_l2(
self.AX + self.U, (self.lmbda/self.rho)*self.Wtvna,
axis=self.saxes), dtype=self.dtype) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. |
379,346 | def export_txt(obj, file_name, two_dimensional=False, **kwargs):
if obj.ctrlpts is None or len(obj.ctrlpts) == 0:
raise exch.GeomdlException("There are no control points to save!")
if obj.pdimension == 1 and two_dimensional:
two_dimensional = False
col_sep = kwargs.get(, ";")
sep = kwargs.get(, ",")
content = exch.export_text_data(obj, sep, col_sep, two_dimensional)
return exch.write_file(file_name, content) | Exports control points as a text file.
For curves the output is always a list of control points. For surfaces, it is possible to generate a 2-dimensional
control point output file using ``two_dimensional``.
Please see :py:func:`.exchange.import_txt()` for detailed description of the keyword arguments.
:param obj: a spline geometry object
:type obj: abstract.SplineGeometry
:param file_name: file name of the text file to be saved
:type file_name: str
:param two_dimensional: type of the text file (only works for Surface objects)
:type two_dimensional: bool
:raises GeomdlException: an error occurred writing the file |
379,347 | def load_registered_fixtures(context):
runner = context._runner
step_registry = getattr(runner, , None)
if not step_registry:
step_registry = module_step_registry.registry
for step in context.scenario.all_steps:
match = step_registry.find_match(step)
if match and hasattr(match.func, ):
if not context.test.fixtures:
context.test.fixtures = []
context.test.fixtures.extend(match.func.registered_fixtures) | Apply fixtures that are registered with the @fixtures decorator. |
379,348 | def remove_users_from_account_group(self, account_id, group_id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.remove_users_from_account_group_with_http_info(account_id, group_id, **kwargs)
else:
(data) = self.remove_users_from_account_group_with_http_info(account_id, group_id, **kwargs)
return data | Remove users from a group. # noqa: E501
An endpoint for removing users from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.remove_users_from_account_group(account_id, group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str group_id: (required)
:param SubjectList body:
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread. |
379,349 | def sub_tag(self, path, follow=True):
tags = path.split()
rec = self
for tag in tags:
recs = [x for x in (rec.sub_records or []) if x.tag == tag]
if not recs:
return None
rec = recs[0]
if follow and isinstance(rec, Pointer):
rec = rec.ref
return rec | Returns direct sub-record with given tag name or None.
Path can be a simple tag name, in which case the first direct
sub-record of this record with the matching tag is returned. Path
can also consist of several tags separated by slashes, in that case
sub-records are searched recursively.
If `follow` is True then pointer records are resolved and pointed
record is used instead of pointer record, this also works for all
intermediate records in a path.
:param str path: tag names separated by slashes.
:param boolean follow: If True then resolve pointers.
:return: `Record` instance or `None` if sub-record with a given
tag does not exist. |
379,350 | def canonical_peer( self, peer ):
their_host, their_port = url_to_host_port( peer )
if their_host in [, ]:
their_host =
return "%s:%s" % (their_host, their_port) | Get the canonical peer name |
379,351 | def delete_snmp_template(auth, url, template_name= None, template_id= None):
try:
if template_id is None:
snmp_templates = get_snmp_templates(auth, url)
if template_name is None:
template_name = snmp_template[]
template_id = None
for template in snmp_templates:
if template[] == template_name:
template_id = template[]
f_url = url + "/imcrs/plat/res/snmp/%s/delete" % template_id
response = requests.delete(f_url, auth=auth, headers=HEADERS)
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " delete_snmp_template: An Error has occured" | Takes template_name as input to issue RESTUL call to HP IMC which will delete the specific
snmp template from the IMC system
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param template_name: str value of template name
:param template_id: str value template template_id value
:return: int HTTP response code
:rtype int |
379,352 | def postprocess(self, calc, with_module=None, dry_run=None):
for appname, appclass in self.Apps.items():
if with_module and with_module != appname: continue
run_permitted = False
if appclass[]:
for key in appclass[]:
negative = False
if str(appclass[][key]).startswith():
negative = True
scope_prop = appclass[][key][1:]
else:
scope_prop = appclass[][key]
if key in calc.info:
if (str(scope_prop) in str(calc.info[key]) or scope_prop == calc.info[key]) != negative:
run_permitted = True
else:
run_permitted = False
break
else: run_permitted = True
if run_permitted:
calc.apps[appname] = {: None, : None}
if dry_run:
continue
try:
AppInstance = appclass[](calc)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
errmsg = "Fatal error in %s module:\n %s" % ( appname, " ".join(traceback.format_exception( exc_type, exc_value, exc_tb )) )
calc.apps[appname][] = errmsg
calc.warning( errmsg )
else:
try:
calc.apps[appname][] = getattr(AppInstance, appclass[])
except AttributeError:
errmsg = % appname
calc.apps[appname][] = errmsg
calc.warning( errmsg )
return calc | Invokes module(s) API
NB: this is the PUBLIC method
@returns apps_dict |
379,353 | def create_sconstruct(self, project_dir=, sayyes=False):
project_dir = util.check_dir(project_dir)
sconstruct_name =
sconstruct_path = util.safe_join(project_dir, sconstruct_name)
local_sconstruct_path = util.safe_join(
util.get_folder(), sconstruct_name)
if isfile(sconstruct_path):
if sayyes:
self._copy_sconstruct_file(sconstruct_name, sconstruct_path,
local_sconstruct_path)
else:
click.secho(
.format(sconstruct_name),
fg=)
if click.confirm():
self._copy_sconstruct_file(sconstruct_name,
sconstruct_path,
local_sconstruct_path)
else:
click.secho(, fg=)
else:
self._copy_sconstruct_file(sconstruct_name, sconstruct_path,
local_sconstruct_path) | Creates a default SConstruct file |
379,354 | def expect(self, pattern, timeout=-1):
if self.blocking:
raise RuntimeError("expect can only be used on non-blocking commands.")
try:
self.subprocess.expect(pattern=pattern, timeout=timeout)
except pexpect.EOF:
pass | Waits on the given pattern to appear in std_out |
379,355 | def diff(cls, a, b, ignore_formatting=False):
def underline(x): return u % (x,)
def blink(x): return u % (x,)
a_rows = []
b_rows = []
max_width = max([len(row) for row in a] + [len(row) for row in b])
a_lengths = []
b_lengths = []
for a_row, b_row in zip(a, b):
a_lengths.append(len(a_row))
b_lengths.append(len(b_row))
extra_a = u * (max_width - len(a_row))
extra_b = u * (max_width - len(b_row))
a_line = u
b_line = u
for a_char, b_char in zip(a_row + extra_a, b_row + extra_b):
if ignore_formatting:
a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char
b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char
else:
a_char_for_eval = a_char
b_char_for_eval = b_char
if a_char_for_eval == b_char_for_eval:
a_line += actualize(a_char)
b_line += actualize(b_char)
else:
a_line += underline(blink(actualize(a_char)))
b_line += underline(blink(actualize(b_char)))
a_rows.append(a_line)
b_rows.append(b_line)
hdiff = .join(a_line + u % (a_len, b_len) + b_line for a_line, b_line, a_len, b_len in zip(a_rows, b_rows, a_lengths, b_lengths))
return hdiff | Returns two FSArrays with differences underlined |
379,356 | def _set_cos_traffic_class(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",cos_traffic_class.cos_traffic_class, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: None, u: None, u: None, u: u, u: u}}), is_container=, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__cos_traffic_class = t
if hasattr(self, ):
self._set() | Setter method for cos_traffic_class, mapped from YANG variable /qos/map/cos_traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_cos_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cos_traffic_class() directly. |
379,357 | def transform(self, X):
if self.timeseries_container is None:
raise RuntimeError("You have to provide a time series using the set_timeseries_container function before.")
timeseries_container_X = restrict_input_to_index(self.timeseries_container, self.column_id, X.index)
extracted_features = extract_features(timeseries_container_X,
default_fc_parameters=self.default_fc_parameters,
kind_to_fc_parameters=self.kind_to_fc_parameters,
column_id=self.column_id, column_sort=self.column_sort,
column_kind=self.column_kind, column_value=self.column_value,
chunksize=self.chunksize,
n_jobs=self.n_jobs, show_warnings=self.show_warnings,
disable_progressbar=self.disable_progressbar,
impute_function=self.impute_function,
profile=self.profile,
profiling_filename=self.profiling_filename,
profiling_sorting=self.profiling_sorting)
X = pd.merge(X, extracted_features, left_index=True, right_index=True, how="left")
return X | Add the features calculated using the timeseries_container and add them to the corresponding rows in the input
pandas.DataFrame X.
To save some computing time, you should only include those time serieses in the container, that you
need. You can set the timeseries container with the method :func:`set_timeseries_container`.
:param X: the DataFrame to which the calculated timeseries features will be added. This is *not* the
dataframe with the timeseries itself.
:type X: pandas.DataFrame
:return: The input DataFrame, but with added features.
:rtype: pandas.DataFrame |
379,358 | def _sd_handler(self, desc_type, unit, desc, show_on_keypad):
if desc_type not in self._descriptions_in_progress:
LOG.debug("Text description response ignored for " + str(desc_type))
return
(max_units, results, callback) = self._descriptions_in_progress[desc_type]
if unit < 0 or unit >= max_units:
callback(results)
del self._descriptions_in_progress[desc_type]
return
results[unit] = desc
self.send(sd_encode(desc_type=desc_type, unit=unit+1)) | Text description |
379,359 | def request_openbus(self, service, endpoint, **kwargs):
if service == :
endpoints = ENDPOINTS_BUS
elif service == :
endpoints = ENDPOINTS_GEO
else:
return None
if endpoint not in endpoints:
return None
url = URL_OPENBUS + endpoints[endpoint]
kwargs[] = self._emt_id
kwargs[] = self._emt_pass
return requests.post(url, data=kwargs, verify=True).json() | Make a request to the given endpoint of the ``openbus`` server.
This returns the plain JSON (dict) response which can then be parsed
using one of the implemented types.
Args:
service (str): Service to fetch ('bus' or 'geo').
endpoint (str): Endpoint to send the request to.
This string corresponds to the key in the ``ENDPOINTS`` dict.
**kwargs: Request arguments.
Returns:
Obtained response (dict) or None if the endpoint was not found. |
379,360 | def enbase64(byte_str):
if isinstance(byte_str, str) and not PYTHON2:
byte_str = bytes(byte_str, )
return base64.b64encode(byte_str) | Encode bytes/strings to base64.
Args:
- ``byte_str``: The string or bytes to base64 encode.
Returns:
- byte_str encoded as base64. |
379,361 | def load_profiles_definitions(filename):
with open(filename, ) as fp:
profile_definitions = yaml.load(fp)
profiles_dict = NocaseDict()
for profile in profile_definitions:
value = ProfileDef(profile["central_class"],
profile["scoping_class"],
profile["scoping_path"],
profile[],
profile[])
key = "%s:%s" % (profile["registered_org"], profile["registered_name"])
profiles_dict[key] = value
return profiles_dict | Load the registered profiles defined in the file filename. This is a
yml file that defines the basic characteristics of each profile with the
following variables:
It produces a dictionary that can be accessed with the a string that
defines the profile organization and name in the form <org>:<profile name>
}, |
379,362 | def run(argv=argv):
config_file = kwconfig.manage(
file_path=resource_filename(Requirement.parse(), ),
defaults={
: ,
: ,
: 3,
: ,
: ,
: 10})
if len(argv) > 1:
if argv[1] == :
open_new_tab(_doc_link)
exit()
elif argv[1] == :
open_new_tab(_cse_link)
exit()
config_file.command(argv, i=1, doc=__doc__, quit=True, silent=False)
kwargs = kwconfig.parse(argv[2:])
kwargs[] = argv[1]
kwargs = config_file.add(kwargs)
buildargs = {}
cseargs = {}
saveargs = {}
optionargs = {}
for k, v in kwargs.items():
if == k[0:6]:
buildargs[k[6:]] = v
elif == k[0:5]:
saveargs[k[5:]] = v
elif == k[0:7]:
optionargs[k[7:]] = v
else:
cseargs[k] = v
results = search_google.api.results(buildargs, cseargs)
if in optionargs:
if optionargs[].lower() != :
results.preview(n=int(optionargs[]))
if in saveargs:
results.save_links(saveargs[])
if in saveargs:
results.save_metadata(saveargs[])
if in saveargs:
results.download_links(saveargs[]) | Runs the search_google command line tool.
This function runs the search_google command line tool
in a terminal. It was intended for use inside a py file
(.py) to be executed using python.
Notes:
* ``[q]`` reflects key ``q`` in the ``cseargs`` parameter for :class:`api.results`
* Optional arguments with ``build_`` are keys in the ``buildargs`` parameter for :class:`api.results`
For distribution, this function must be defined in the following files::
# In 'search_google/search_google/__main__.py'
from .cli import run
run()
# In 'search_google/search_google.py'
from search_google.cli import run
if __name__ == '__main__':
run()
# In 'search_google/__init__.py'
__entry_points__ = {'console_scripts': ['search_google=search_google.cli:run']}
Examples::
# Import google_streetview for the cli module
import search_google.cli
# Create command line arguments
argv = [
'cli.py',
'google',
'--searchType=image',
'--build_developerKey=your_dev_key',
'--cx=your_cx_id'
'--num=1'
]
# Run command line
search_google.cli.run(argv) |
379,363 | def wait_for_import_to_complete(self, import_id, region=):
task_running = True
while task_running:
import_status_cmd = "aws ec2 --profile {} --region --output describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id)
res = subprocess.check_output(shlex.split(import_status_cmd))
print "Current status: {}".format(res)
res_json = json.loads(res)
task_running, image_id = self.check_task_status_and_id(res_json) | Monitors the status of aws import, waiting for it to complete, or error out
:param import_id: id of import task to monitor |
379,364 | def extendManager(mixinClass):
-published
class MixinManager(models.Manager, mixinClass):
class MixinQuerySet(models.query.QuerySet, mixinClass):
pass
def get_queryset(self):
return self.MixinQuerySet(self.model, using = self._db)
return MixinManager() | Use as a class decorator to add extra methods to your model manager.
Example usage:
class Article(django.db.models.Model):
published = models.DateTimeField()
...
@extendManager
class objects(object):
def getPublished(self):
return self.filter(published__lte = django.utils.timezone.now()).order_by('-published')
...
publishedArticles = Article.objects.getPublished() |
379,365 | def pltnp(point, v1, v2, v3):
point = stypes.toDoubleVector(point)
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
v3 = stypes.toDoubleVector(v3)
pnear = stypes.emptyDoubleVector(3)
dist = ctypes.c_double()
libspice.pltnp_c(point, v1, v2, v3, pnear, ctypes.byref(dist))
return stypes.cVectorToPython(pnear), dist.value | Find the nearest point on a triangular plate to a given point.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pltnp_c.html
:param point: A point in 3-dimensional space.
:type point: 3-Element Array of floats
:param v1: Vertices of a triangular plate.
:type v1: 3-Element Array of floats
:param v2: Vertices of a triangular plate.
:type v2: 3-Element Array of floats
:param v3: Vertices of a triangular plate.
:type v3: 3-Element Array of floats
:return: the nearest point on a triangular plate to a given point and distance
:rtype: tuple |
379,366 | def require_exp_directory(f):
error = "The current directory is not a valid Dallinger experiment."
@wraps(f)
def wrapper(**kwargs):
if not verify_directory(kwargs.get("verbose")):
raise click.UsageError(error)
return f(**kwargs)
return wrapper | Decorator to verify that a command is run inside a valid Dallinger
experiment directory. |
379,367 | def get_python_symbol_icons(oedata):
class_icon = ima.icon()
method_icon = ima.icon()
function_icon = ima.icon()
private_icon = ima.icon()
super_private_icon = ima.icon()
symbols = process_python_symbol_data(oedata)
fold_levels = sorted(list(set([s[2] for s in symbols])))
parents = [None]*len(symbols)
icons = [None]*len(symbols)
indexes = []
parent = None
for level in fold_levels:
for index, item in enumerate(symbols):
line, name, fold_level, token = item
if index in indexes:
continue
if fold_level == level:
indexes.append(index)
parent = item
else:
parents[index] = parent
for index, item in enumerate(symbols):
parent = parents[index]
if item[-1] == :
icons[index] = function_icon
elif item[-1] == :
icons[index] = class_icon
else:
icons[index] = QIcon()
if parent is not None:
if parent[-1] == :
if item[-1] == and item[1].startswith():
icons[index] = super_private_icon
elif item[-1] == and item[1].startswith():
icons[index] = private_icon
else:
icons[index] = method_icon
return icons | Return a list of icons for oedata of a python file. |
379,368 | def delete(self, filename, storage_type=None, bucket_name=None):
if not (storage_type and bucket_name):
self._delete_local(filename)
else:
if storage_type != :
raise ValueError( % storage_type)
self._delete_s3(filename, bucket_name) | Deletes the specified file, either locally or from S3, depending on the file's storage type. |
379,369 | def mapper_from_partial_prior_arguments(self, arguments):
original_prior_dict = {prior: prior for prior in self.priors}
return self.mapper_from_prior_arguments({**original_prior_dict, **arguments}) | Creates a new model mapper from a dictionary mapping_matrix existing priors to new priors, keeping existing
priors where no mapping is provided.
Parameters
----------
arguments: {Prior: Prior}
A dictionary mapping_matrix priors to priors
Returns
-------
model_mapper: ModelMapper
A new model mapper with updated priors. |
379,370 | def analyze(self, scratch, **kwargs):
scripts_set = set()
for script in self.iter_scripts(scratch):
if script[0].type.text == :
continue
blocks_list = []
for name, _, _ in self.iter_blocks(script.blocks):
blocks_list.append(name)
blocks_tuple = tuple(blocks_list)
if blocks_tuple in scripts_set:
if len(blocks_list) > 3:
self.total_duplicate += 1
self.list_duplicate.append(blocks_list)
else:
scripts_set.add(blocks_tuple) | Run and return the results from the DuplicateScripts plugin.
Only takes into account scripts with more than 3 blocks. |
379,371 | def from_connections(cls, caption, connections):
root = ET.Element(, caption=caption, version=, inline=)
outer_connection = ET.SubElement(root, )
outer_connection.set(, )
named_conns = ET.SubElement(outer_connection, )
for conn in connections:
nc = ET.SubElement(named_conns,
,
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root) | Create a new Data Source give a list of Connections. |
379,372 | def start(self):
def _heartbeat():
if not self._client.lifecycle.is_live:
return
self._heartbeat()
self._heartbeat_timer = self._client.reactor.add_timer(self._heartbeat_interval, _heartbeat)
self._heartbeat_timer = self._client.reactor.add_timer(self._heartbeat_interval, _heartbeat) | Starts sending periodic HeartBeat operations. |
379,373 | def init_app(self, app):
self.init_extension(app)
if not hasattr(app, ):
app.extensions = {}
classname = self.__class__.__name__
extname = classname.replace(, ).lower()
app.extensions[extname] = self | Initialize extension to the given application.
Extension will be registered to `app.extensions` with lower classname
as key and instance as value.
:param app: Flask application. |
379,374 | def log_start(task, logger="TaskLogger"):
tasklogger = get_tasklogger(logger)
tasklogger.start_task(task)
return tasklogger | Begin logging of a task
Convenience function to log a task in the default
TaskLogger
Parameters
----------
task : str
Name of the task to be started
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger |
379,375 | def iterate(iterator, n=None):
ensure_iterable(iterator)
if n is None:
deque(iterator, maxlen=0)
else:
next(islice(iterator, n, n), None) | Efficiently advances the iterator N times; by default goes to its end.
The actual loop is done "in C" and hence it is faster than equivalent 'for'.
:param n: How much the iterator should be advanced.
If None, it will be advanced until the end. |
379,376 | def remote_archive(class_obj: type) -> type:
assert isinstance(class_obj, type), "class_obj is not a Class"
global _remote_archive_resource_type
_remote_archive_resource_type = class_obj
return class_obj | Decorator to annotate the RemoteArchive class. Registers the decorated class
as the RemoteArchive known type. |
379,377 | def build(outdir):
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding()
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.") | Blends the generated files and outputs a HTML website |
379,378 | def gamma(ranks_list1,ranks_list2):
s gamma correlation coefficient
:param ranks_list1: a list of ranks (integers)
:param ranks_list2: a second list of ranks (integers) of equal length with corresponding entries
:return: Gamma correlation coefficient (rank correlation ignoring ties)
n/a'
return [num_tied_x, num_tied_y, num_tied_xy, gamma_corr_coeff] | Goodman and Kruskal's gamma correlation coefficient
:param ranks_list1: a list of ranks (integers)
:param ranks_list2: a second list of ranks (integers) of equal length with corresponding entries
:return: Gamma correlation coefficient (rank correlation ignoring ties) |
379,379 | def slice_rates_to_data(self, strain):
output_variables = list(strain.data)
cond = (isinstance(strain.target_magnitudes, np.ndarray) or
isinstance(strain.target_magnitudes, list))
if cond:
magnitude_list = [ % mag for mag in strain.target_magnitudes]
else:
return strain, output_variables
assert np.shape(strain.seismicity_rate)[0] == \
strain.get_number_observations()
for iloc, magnitude in enumerate(magnitude_list):
strain.data[magnitude] = strain.seismicity_rate[:, iloc]
output_variables.extend(magnitude_list)
return strain, output_variables | For the strain data, checks to see if seismicity rates have been
calculated. If so, each column in the array is sliced and stored as a
single vector in the strain.data dictionary with the corresponding
magnitude as a key.
:param strain:
Instance of :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain
:returns:
strain - Instance of strain class with updated data dictionary
output_variables - Updated list of headers |
379,380 | def check_event_coverage(patterns, event_list):
proportions = []
for pattern_list in patterns:
proportion = 0
for pattern in pattern_list:
for node in pattern.nodes():
if node in event_list:
proportion += 1.0 / len(pattern_list)
break
proportions.append(proportion)
return proportions | Calculate the ratio of patterns that were extracted. |
379,381 | def solver(A, config):
A = make_csr(A)
try:
return \
smoothed_aggregation_solver(A,
B=config[],
BH=config[],
smooth=config[],
strength=config[],
max_levels=config[],
max_coarse=config[],
coarse_solver=config[],
symmetry=config[],
aggregate=config[],
presmoother=config[],
postsmoother=config[],
keep=config[])
except BaseException:
raise TypeError() | Generate an SA solver given matrix A and a configuration.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
Matrix to invert, CSR or BSR format preferred for efficiency
config : dict
A dictionary of solver configuration parameters that is used to
generate a smoothed aggregation solver
Returns
-------
ml : smoothed_aggregation_solver
smoothed aggregation hierarchy
Notes
-----
config must contain the following parameter entries for
smoothed_aggregation_solver: symmetry, smooth, presmoother, postsmoother,
B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration,solver
>>> A = poisson((40,40),format='csr')
>>> config = solver_configuration(A,verb=False)
>>> ml = solver(A,config) |
379,382 | def generate_not(self):
not_definition = self._definition[]
if not_definition is True:
self.l()
elif not_definition is False:
return
elif not not_definition:
with self.l(, self._variable):
self.l()
else:
with self.l():
self.generate_func_code_block(not_definition, self._variable, self._variable_name)
self.l()
self.l() | Means that value have not to be valid by this definition.
.. code-block:: python
{'not': {'type': 'null'}}
Valid values for this definition are 'hello', 42, {} ... but not None.
Since draft 06 definition can be boolean. False means nothing, True
means everything is invalid. |
379,383 | def transform(self, X, mean=None, lenscale=None):
mean = self._check_dim(X.shape[1], mean, paramind=0)
lenscale = self._check_dim(X.shape[1], lenscale, paramind=1)
VX = self._makeVX(X / lenscale)
mX = X.dot(mean)[:, np.newaxis]
Phi = np.hstack((np.cos(VX + mX), np.sin(VX + mX),
np.cos(VX - mX), np.sin(VX - mX))) / \
np.sqrt(2 * self.n)
return Phi | Apply the spectral mixture component basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
mean: ndarray, optional
array of shape (d,) frequency means (one for each dimension of X).
If not input, this uses the value of the initial mean.
lenscale: ndarray, optional
array of shape (d,) length scales (one for each dimension of X). If
not input, this uses the value of the initial length scale.
Returns
-------
ndarray:
of shape (N, 4*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power). |
379,384 | def add_directory(self, iso_path=None, rr_name=None, joliet_path=None,
file_mode=None, udf_path=None):
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput()
if iso_path is None and joliet_path is None and udf_path is None:
raise pycdlibexception.PyCdlibInvalidInput()
if file_mode is not None and not self.rock_ridge:
raise pycdlibexception.PyCdlibInvalidInput()
if file_mode is None:
file_mode = 0o040555
num_bytes_to_add = 0
if iso_path is not None:
iso_path_bytes = utils.normpath(iso_path)
new_rr_name = self._check_rr_name(rr_name)
depth = len(utils.split_path(iso_path_bytes))
if not self.rock_ridge and self.enhanced_vd is None:
_check_path_depth(iso_path_bytes)
(name, parent) = self._iso_name_and_parent_from_path(iso_path_bytes)
_check_iso9660_directory(name, self.interchange_level)
relocated = False
fake_dir_rec = None
orig_parent = None
iso9660_name = name
if self.rock_ridge and (depth % 8) == 0 and self.enhanced_vd is None:
num_bytes_to_add += self._find_or_create_rr_moved()
fake_dir_rec = dr.DirectoryRecord()
fake_dir_rec.new_dir(self.pvd, name, parent,
self.pvd.sequence_number(),
self.rock_ridge, new_rr_name,
self.pvd.logical_block_size(), True, False,
self.xa, file_mode)
num_bytes_to_add += self._add_child_to_dr(fake_dir_rec,
self.pvd.logical_block_size())
iso9660_name = name + ( % (index)).encode()
index += 1
break
else:
break
rec = dr.DirectoryRecord()
rec.new_dir(self.pvd, iso9660_name, parent,
self.pvd.sequence_number(), self.rock_ridge, new_rr_name,
self.pvd.logical_block_size(), False, relocated,
self.xa, file_mode)
num_bytes_to_add += self._add_child_to_dr(rec, self.pvd.logical_block_size())
if rec.rock_ridge is not None:
if relocated:
fake_dir_rec.rock_ridge.cl_to_moved_dr = rec
rec.rock_ridge.moved_to_cl_dr = fake_dir_rec
num_bytes_to_add += self._update_rr_ce_entry(rec)
self._create_dot(self.pvd, rec, self.rock_ridge, self.xa, file_mode)
parent_file_mode = -1
if parent.rock_ridge is not None:
parent_file_mode = parent.rock_ridge.get_file_mode()
else:
if parent.is_root:
parent_file_mode = file_mode
dotdot = self._create_dotdot(self.pvd, rec, self.rock_ridge,
relocated, self.xa, parent_file_mode)
if dotdot.rock_ridge is not None and relocated:
dotdot.rock_ridge.parent_link = orig_parent
ptr = path_table_record.PathTableRecord()
ptr.new_dir(iso9660_name)
num_bytes_to_add += self._add_to_ptr_size(ptr) + self.pvd.logical_block_size()
rec.set_ptr(ptr)
if joliet_path is not None:
num_bytes_to_add += self._add_joliet_dir(self._normalize_joliet_path(joliet_path))
if udf_path is not None:
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput()
log_block_size = self.pvd.logical_block_size()
udf_path_bytes = utils.normpath(udf_path)
(udf_name, udf_parent) = self._udf_name_and_parent_from_path(udf_path_bytes)
file_ident = udfmod.UDFFileIdentifierDescriptor()
file_ident.new(True, False, udf_name, udf_parent)
num_new_extents = udf_parent.add_file_ident_desc(file_ident, log_block_size)
num_bytes_to_add += num_new_extents * log_block_size
file_entry = udfmod.UDFFileEntry()
file_entry.new(0, , udf_parent, log_block_size)
file_ident.file_entry = file_entry
file_entry.file_ident = file_ident
num_bytes_to_add += log_block_size
udf_dotdot = udfmod.UDFFileIdentifierDescriptor()
udf_dotdot.new(True, True, b, udf_parent)
num_new_extents = file_ident.file_entry.add_file_ident_desc(udf_dotdot, log_block_size)
num_bytes_to_add += num_new_extents * log_block_size
self.udf_logical_volume_integrity.logical_volume_impl_use.num_dirs += 1
self._finish_add(0, num_bytes_to_add) | Add a directory to the ISO. At least one of an iso_path, joliet_path,
or udf_path must be provided. Providing joliet_path on a non-Joliet
ISO, or udf_path on a non-UDF ISO, is an error. If the ISO contains
Rock Ridge, then a Rock Ridge name must be provided.
Parameters:
iso_path - The ISO9660 absolute path to use for the directory.
rr_name - The Rock Ridge name to use for the directory.
joliet_path - The Joliet absolute path to use for the directory.
file_mode - The POSIX file mode to use for the directory. This only
applies for Rock Ridge ISOs.
udf_path - The UDF absolute path to use for the directory.
Returns:
Nothing. |
379,385 | def get_last_week_range(weekday_start="Sunday"):
today = date.today()
start_of_week = snap_to_beginning_of_week(today, weekday_start) - timedelta(weeks=1)
end_of_week = start_of_week + timedelta(days=6)
return (start_of_week, end_of_week) | Gets the date for the first and the last day of the previous complete week.
:param weekday_start: Either "Monday" or "Sunday", indicating the first day of the week.
:returns: A tuple containing two date objects, for the first and the last day of the week
respectively. |
379,386 | def add(self, factory, component, properties=None):
with self.__lock:
if component in self.__names:
raise ValueError(
"Component name already queued: {0}".format(component)
)
if properties is None:
properties = {}
self.__names[component] = factory
self.__queue.setdefault(factory, {})[component] = properties
try:
with use_ipopo(self.__context) as ipopo:
self._try_instantiate(ipopo, factory, component)
except BundleException:
pass | Enqueues the instantiation of the given component
:param factory: Factory name
:param component: Component name
:param properties: Component properties
:raise ValueError: Component name already reserved in the queue
:raise Exception: Error instantiating the component |
379,387 | def addVariantSet(self):
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
dataUrls = self._args.dataFiles
name = self._args.name
if len(dataUrls) == 1:
if self._args.name is None:
name = getNameFromPath(dataUrls[0])
if os.path.isdir(dataUrls[0]):
vcfDir = dataUrls[0]
pattern = os.path.join(vcfDir, "*.vcf.gz")
dataUrls = glob.glob(pattern)
if len(dataUrls) == 0:
raise exceptions.RepoManagerException(
"Cannot find any VCF files in the directory "
".".format(vcfDir))
dataUrls[0] = self._getFilePath(dataUrls[0],
self._args.relativePath)
elif self._args.name is None:
raise exceptions.RepoManagerException(
"Cannot infer the intended name of the VariantSet when "
"more than one VCF file is provided. Please provide a "
"name argument using --name.")
parsed = urlparse.urlparse(dataUrls[0])
if parsed.scheme not in [, ]:
dataUrls = map(lambda url: self._getFilePath(
url, self._args.relativePath), dataUrls)
"stored locally if the default index location is "
"used. If you are trying to create a VariantSet "
"based on remote URLs, please download the index "
"files to the local file system and provide them "
"with the --indexFiles argument".format(dataUrl))
indexSuffix = ".tbi"
indexFiles = [filename + indexSuffix for filename in dataUrls]
indexFiles = map(lambda url: self._getFilePath(
url, self._args.relativePath), indexFiles)
variantSet = variants.HtslibVariantSet(dataset, name)
variantSet.populateFromFile(dataUrls, indexFiles)
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
referenceSetName = variantSet.getVcfHeaderReferenceSetName()
if referenceSetName is None:
raise exceptions.RepoManagerException(
"Cannot infer the ReferenceSet from the VCF header. Please "
"specify the ReferenceSet to associate with this "
"VariantSet using the --referenceSetName option")
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
variantSet.setReferenceSet(referenceSet)
variantSet.setAttributes(json.loads(self._args.attributes))
annotationSets = []
if variantSet.isAnnotated() and self._args.addAnnotationSets:
ontologyName = self._args.ontologyName
if ontologyName is None:
raise exceptions.RepoManagerException(
"A sequence ontology name must be provided")
ontology = self._repo.getOntologyByName(ontologyName)
self._checkSequenceOntology(ontology)
for annotationSet in variantSet.getVariantAnnotationSets():
annotationSet.setOntology(ontology)
annotationSets.append(annotationSet)
def updateRepo():
self._repo.insertVariantSet(variantSet)
for annotationSet in annotationSets:
self._repo.insertVariantAnnotationSet(annotationSet)
self._updateRepo(updateRepo) | Adds a new VariantSet into this repo. |
379,388 | def to_json(self):
json_dict = self.to_json_basic()
json_dict[] = self.relay_channels
return json.dumps(json_dict) | :return: str |
379,389 | def winner(self):
for c in :
for comb in [(0,3,6), (1,4,7), (2,5,8), (0,1,2), (3,4,5), (6,7,8), (0,4,8), (2,4,6)]:
if all(self.spots[p] == c for p in comb):
return c
return None | Returns either x or o if one of them won, otherwise None |
379,390 | def zoom_out(self):
zoom = self.grid.grid_renderer.zoom
target_zoom = zoom * (1 - config["zoom_factor"])
if target_zoom > config["minimum_zoom"]:
self.zoom(target_zoom) | Zooms out by zoom factor |
379,391 | def airspeeds_encode(self, time_boot_ms, airspeed_imu, airspeed_pitot, airspeed_hot_wire, airspeed_ultrasonic, aoa, aoy):
return MAVLink_airspeeds_message(time_boot_ms, airspeed_imu, airspeed_pitot, airspeed_hot_wire, airspeed_ultrasonic, aoa, aoy) | The airspeed measured by sensors and IMU
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
airspeed_imu : Airspeed estimate from IMU, cm/s (int16_t)
airspeed_pitot : Pitot measured forward airpseed, cm/s (int16_t)
airspeed_hot_wire : Hot wire anenometer measured airspeed, cm/s (int16_t)
airspeed_ultrasonic : Ultrasonic measured airspeed, cm/s (int16_t)
aoa : Angle of attack sensor, degrees * 10 (int16_t)
aoy : Yaw angle sensor, degrees * 10 (int16_t) |
379,392 | async def save(self):
if in self._changed_data and self._changed_data[]:
self._changed_data[] = self._changed_data[][]
if (self._orig_data[] and
in self._orig_data[] and
self._changed_data[] == (
self._orig_data[][])):
await super(Subnet, self).save() | Save this subnet. |
379,393 | def _clean_streams(repo, mapped_streams):
for stream_name in (, ):
stream = mapped_streams.get(stream_name)
if not stream:
continue
path = os.path.relpath(stream, start=repo.working_dir)
if (path, 0) not in repo.index.entries:
os.remove(stream)
else:
blob = repo.index.entries[(path, 0)].to_blob(repo)
with open(path, ) as fp:
fp.write(blob.data_stream.read()) | Clean mapped standard streams. |
379,394 | def DSP_callback_toc(self):
if self.Tcapture > 0:
self.DSP_toc.append(time.time()-self.start_time) | Add new toc time to the DSP_toc list. Will not be called if
Tcapture = 0. |
379,395 | def headers_for_url(cls, url):
response = cls.http_request(url, method=)
if response.status != 200:
cls.raise_http_error(response)
return Resource.headers_as_dict(response) | Return the headers only for the given URL as a dict |
379,396 | def entropy(self, base = 2):
entropy = 0
if not base and self.base: base = self.base
for type in self._dist:
if not base:
entropy += self._dist[type] * -math.log(self._dist[type])
else:
entropy += self._dist[type] * -math.log(self._dist[type], base)
return entropy | Compute the entropy of the distribution |
379,397 | def _get_price_id_for_upgrade(self, package_items, option, value, public=True):
warnings.warn("use _get_price_id_for_upgrade_option() instead",
DeprecationWarning)
option_category = {
: ,
: ,
:
}
category_code = option_category[option]
for item in package_items:
is_private = (item.get() == )
for price in item[]:
if in price and price[]:
continue
if not in price:
continue
categories = price[]
for category in categories:
if not (category[] == category_code
and str(item[]) == str(value)):
continue
if option == :
if public and not is_private:
return price[]
elif not public and is_private:
return price[]
elif option == :
if in item[]:
return price[]
else:
return price[] | Find the price id for the option and value to upgrade.
Deprecated in favor of _get_price_id_for_upgrade_option()
:param list package_items: Contains all the items related to an VS
:param string option: Describes type of parameter to be upgraded
:param int value: The value of the parameter to be upgraded
:param bool public: CPU will be in Private/Public Node. |
379,398 | def walk_files(args, root, directory, action):
for entry in os.listdir(directory):
if is_hidden(args, entry):
continue
if is_excluded_directory(args, entry):
continue
if is_in_default_excludes(entry):
continue
if not is_included(args, entry):
continue
if is_excluded(args, entry, directory):
continue
entry = os.path.join(directory, entry)
if os.path.isdir(entry):
walk_files(args, root, entry, action)
if os.path.isfile(entry):
if is_binary(entry):
continue
action(entry) | Recusively go do the subdirectories of the directory,
calling the action on each file |
379,399 | def list(context, job_id, sort, limit, where, verbose):
result = job.list_files(context, id=job_id, sort=sort, limit=limit,
verbose=verbose, where=where)
utils.format_output(result, context.format, verbose=verbose) | list(context, sort, limit, where, verbose)
List all files.
>>> dcictl file-list job-id [OPTIONS]
:param string sort: Field to apply sort
:param integer limit: Max number of rows to return
:param string where: An optional filter criteria
:param boolean verbose: Display verbose output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.