Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
382,500 | def rotate(self, angle, center=(0, 0)):
ca = numpy.cos(angle)
sa = numpy.sin(angle)
sa = numpy.array((-sa, sa))
c0 = numpy.array(center)
self.polygons = [(points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
for points in self.polygons]
return self | Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : ``PolygonSet``
This object. |
382,501 | def groups_leave(self, room_id, **kwargs):
return self.__call_api_post(, roomId=room_id, kwargs=kwargs) | Causes the callee to be removed from the private group, if they’re part of it and are not the last owner. |
382,502 | def _recv_callback(self, msg):
m2req = MongrelRequest.parse(msg[0])
MongrelConnection(m2req, self._sending_stream, self.request_callback,
no_keep_alive=self.no_keep_alive, xheaders=self.xheaders) | Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String. |
382,503 | def add_positional_embedding(x, max_length, name=None, positions=None):
with tf.name_scope("add_positional_embedding"):
_, length, depth = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if positions is None:
pad_length = tf.maximum(0, length - max_length)
sliced = tf.cond(
tf.less(length, max_length),
lambda: tf.slice(var, [0, 0], [length, -1]),
lambda: tf.pad(var, [[0, pad_length], [0, 0]]))
return x + tf.expand_dims(sliced, 0)
else:
return x + tf.gather(var, tf.to_int32(positions)) | Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x. |
382,504 | def unusedoptions(self, sections):
unused = set([])
for section in _list(sections):
if not self.has_section(section):
continue
options = self.options(section)
raw_values = [self.get(section, option, raw=True) for option in options]
for option in options:
formatter = "%(" + option + ")s"
for raw_value in raw_values:
if formatter in raw_value:
break
else:
unused.add(option)
return list(unused) | Lists options that have not been used to format other values in
their sections.
Good for finding out if the user has misspelled any of the options. |
382,505 | def update(self,
message=None,
subject=None,
days=None,
downloads=None,
notify=None):
method, url = get_URL()
payload = {
: self.config.get(),
: self.session.cookies.get(),
: self.transfer_id,
}
data = {
: message or self.transfer_info.get(),
: subject or self.transfer_info.get(),
: days or self.transfer_info.get(),
: downloads or self.transfer_info.get(),
: notify or self.transfer_info.get()
}
payload.update(data)
res = getattr(self.session, method)(url, params=payload)
if res.status_code:
self.transfer_info.update(data)
return True
hellraiser(res) | Update properties for a transfer.
:param message: updated message to recipient(s)
:param subject: updated subject for trasfer
:param days: updated amount of days transfer is available
:param downloads: update amount of downloads allowed for transfer
:param notify: update whether to notifiy on downloads or not
:type message: ``str`` or ``unicode``
:type subject: ``str`` or ``unicode``
:type days: ``int``
:type downloads: ``int``
:type notify: ``bool``
:rtype: ``bool`` |
382,506 | def get_assets(self):
if self.retrieved:
raise errors.IllegalState()
self.retrieved = True
return objects.AssetList(self._results, runtime=self._runtime) | Gets the asset list resulting from a search.
return: (osid.repository.AssetList) - the asset list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.* |
382,507 | def find_motif_disruptions(
position,
ref,
alt,
genome_fasta,
matrices,
):
import subprocess
import MOODS
max_motif_length = max([x.shape[0] for x in matrices.values()])
chrom, coords = position.split()
start,end = [int(x) for x in coords.split()]
s = .format(chrom, start - max_motif_length + 1, end +
max_motif_length - 1)
c = .format(genome_fasta, s)
seq_lines = subprocess.check_output(c, shell=True).strip().split()
ref_seq = seq_lines[1]
alt_seq = ref_seq[0:max_motif_length - 1] + alt + ref_seq[max_motif_length + len(ref) - 1:]
ref_variant_start = max_motif_length - 1
ref_variant_end = max_motif_length - 1 + len(ref)
alt_variant_start = max_motif_length - 1
alt_variant_end = max_motif_length - 1 + len(alt)
ms = [matrices[x].T.values.tolist() for x in matrices.keys()]
ref_res = MOODS.search(ref_seq, ms, 0.001, both_strands=True,
bg=[0.25, 0.25, 0.25, 0.25])
ref_res = dict(zip(matrices.keys(), ref_res))
alt_res = MOODS.search(alt_seq, ms, 0.001, both_strands=True,
bg=[0.25, 0.25, 0.25, 0.25])
alt_res = dict(zip(matrices.keys(), alt_res))
rows = []
for motif in ref_res.keys():
ref_res[motif] = _filter_variant_motif_res(ref_res[motif], ref_variant_start, ref_variant_end,
matrices[motif].shape[0], ref_seq)
alt_res[motif] = _filter_variant_motif_res(alt_res[motif], alt_variant_start, alt_variant_end,
matrices[motif].shape[0], alt_seq)
if len(ref_res[motif]) > 0:
ref_pos, ref_score = sorted(ref_res[motif], key=lambda x: x[1], reverse=True)[0]
ref_strand = {True:, False:}[ref_pos > 0]
else:
ref_score = 0
ref_strand =
if len(alt_res[motif]) > 0:
alt_pos, alt_score = sorted(alt_res[motif], key=lambda x: x[1], reverse=True)[0]
alt_strand = {True:, False:}[alt_pos > 0]
else:
alt_score = 0
alt_strand =
if ref_score > 0 or alt_score > 0:
diff = ref_score - alt_score
rows.append([motif, ref_score, ref_strand, alt_score, alt_strand, diff])
out = pd.DataFrame(rows, columns=[, , , ,
, ])
out.index = out.motif
out = out.drop(, axis=1)
out = out[out.score_diff != 0]
return out | Determine whether there is a difference between the ref and alt
alleles for TF binding. Requires samtools in your path.
Parameters
----------
position : str
Zero based genomic coordinates of the reference allele of the form
chrom:start-end (chr5:100-101 for a SNV for instance). The value end -
start should equal the length of the ref allele.
ref : str
Reference allele. This should match the reference sequence at "position"
in genome_fasta.
alt : str
Alternate allele.
genome_fasta : str
Path to genome fasta file. This file should be indexed.
matrices : dict
Dict whose keys are motif names and whose values are pandas data frames
or numpy arrays containing PWMs with columns ACGT.
Returns
-------
out : pandas.DataFrame
Pandas data frame with motifs whose best matches that overlapped the
variant differed between the reference and alternate sequences. A score
of zero and a strand of '' indicates that there was not a match for the
motif on the given allele. |
382,508 | def write_table(self, table, rows, append=False, gzip=False):
_write_table(self.root,
table,
rows,
self.table_relations(table),
append=append,
gzip=gzip,
encoding=self.encoding) | Encode and write out *table* to the profile directory.
Args:
table: The name of the table to write
rows: The rows to write to the table
append: If `True`, append the encoded rows to any existing
data.
gzip: If `True`, compress the resulting table with `gzip`.
The table's filename will have `.gz` appended. |
382,509 | def modfacl(acl_type, acl_name=, perms=, *args, **kwargs):
******
recursive = kwargs.pop(, False)
raise_err = kwargs.pop(, False)
_raise_on_no_files(*args)
cmd =
if recursive:
cmd +=
cmd +=
cmd = .format(cmd, _acl_prefix(acl_type), acl_name, perms)
for dentry in args:
cmd += .format(dentry)
__salt__[](cmd, python_shell=False, raise_err=raise_err)
return True | Add or modify a FACL for the specified file(s)
CLI Examples:
.. code-block:: bash
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen
salt '*' acl.modfacl default:group mygroup rx /tmp/house/kitchen
salt '*' acl.modfacl d:u myuser 7 /tmp/house/kitchen
salt '*' acl.modfacl g mygroup 0 /tmp/house/kitchen /tmp/house/livingroom
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen recursive=True
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen raise_err=True |
382,510 | def _huffman_encode_char(cls, c):
if isinstance(c, EOS):
return cls.static_huffman_code[-1]
else:
assert(isinstance(c, int) or len(c) == 1)
return cls.static_huffman_code[orb(c)] | huffman_encode_char assumes that the static_huffman_tree was
previously initialized
@param str|EOS c: a symbol to encode
@return (int, int): the bitstring of the symbol and its bitlength
@raise AssertionError |
382,511 | def get_proto(self):
if self.proto_idx_value is None:
self.proto_idx_value = self.CM.get_proto(self.proto_idx)
return self.proto_idx_value | Return the prototype of the method
:rtype: string |
382,512 | def open_channel(self):
logger.debug()
self._connection.channel(on_open_callback=self.on_channel_open) | Open a new channel with RabbitMQ.
When RabbitMQ responds that the channel is open, the on_channel_open
callback will be invoked by pika. |
382,513 | def fromBrdict(cls, master, brdict):
cache = master.caches.get_cache("BuildRequests", cls._make_br)
return cache.get(brdict[], brdict=brdict, master=master) | Construct a new L{BuildRequest} from a dictionary as returned by
L{BuildRequestsConnectorComponent.getBuildRequest}.
This method uses a cache, which may result in return of stale objects;
for the most up-to-date information, use the database connector
methods.
@param master: current build master
@param brdict: build request dictionary
@returns: L{BuildRequest}, via Deferred |
382,514 | def create(server_):
try:
if server_[] and config.is_profile_configured(__opts__,
__active_provider_name__ or ,
server_[],
vm_=server_) is False:
return False
except AttributeError:
pass
__utils__[](
,
,
.format(server_[]),
args=__utils__[](, server_, [, , , ]),
sock_dir=__opts__[],
transport=__opts__[]
)
log.info(, server_[])
access_key = config.get_cloud_config_value(
, get_configured_provider(), __opts__, search_global=False
)
commercial_type = config.get_cloud_config_value(
, server_, __opts__, default=
)
key_filename = config.get_cloud_config_value(
, server_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
{0}\.format(
key_filename
)
)
ssh_password = config.get_cloud_config_value(
, server_, __opts__
)
kwargs = {
: server_[],
: access_key,
: get_image(server_),
: commercial_type,
}
__utils__[](
,
,
.format(server_[]),
args={
: __utils__[](, kwargs, list(kwargs)),
},
sock_dir=__opts__[],
transport=__opts__[]
)
try:
ret = create_node(kwargs)
except Exception as exc:
log.error(
,
server_[], exc,
exc_info_on_loglevel=logging.DEBUG
)
return False
def __query_node_data(server_name):
data = show_instance(server_name, )
if data and data.get():
return data
return False
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(server_[],),
timeout=config.get_cloud_config_value(
, server_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
, server_, __opts__, default=10),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
)
return ret | Create a single BareMetal server from a data dict. |
382,515 | def add_widgets_context(request, context):
user = context["user"]
if context["is_student"] or context["eighth_sponsor"]:
num_blocks = 6
surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks)
if context["is_student"]:
schedule, no_signup_today = gen_schedule(user, num_blocks, surrounding_blocks)
context.update({
"schedule": schedule,
"last_displayed_block": schedule[-1] if schedule else None,
"no_signup_today": no_signup_today,
"senior_graduation": settings.SENIOR_GRADUATION,
"senior_graduation_year": settings.SENIOR_GRADUATION_YEAR
})
if context["eighth_sponsor"]:
sponsor_date = request.GET.get("sponsor_date", None)
if sponsor_date:
sponsor_date = decode_date(sponsor_date)
if sponsor_date:
block = EighthBlock.objects.filter(date__gte=sponsor_date).first()
if block:
surrounding_blocks = [block] + list(block.next_blocks(num_blocks - 1))
else:
surrounding_blocks = []
sponsor_sch = gen_sponsor_schedule(user, context["eighth_sponsor"], num_blocks, surrounding_blocks, sponsor_date)
context.update(sponsor_sch)
birthdays = find_birthdays(request)
context["birthdays"] = find_visible_birthdays(request, birthdays)
sched_ctx = schedule_context(request)
context.update(sched_ctx)
return context | WIDGETS:
* Eighth signup (STUDENT)
* Eighth attendance (TEACHER or ADMIN)
* Bell schedule (ALL)
* Birthdays (ALL)
* Administration (ADMIN)
* Links (ALL)
* Seniors (STUDENT; graduation countdown if senior, link to destinations otherwise) |
382,516 | def ReadUnicodeTable(filename, nfields, doline):
if nfields < 2:
raise InputError("invalid number of fields %d" % (nfields,))
if type(filename) == str:
if filename.startswith("http://"):
fil = urllib2.urlopen(filename)
else:
fil = open(filename, "r")
else:
fil = filename
first = None
expect_last = None
lineno = 0
for line in fil:
lineno += 1
try:
sharp = line.find("
if sharp >= 0:
line = line[:sharp]
line = line.strip()
if not line:
continue
fields = [s.strip() for s in line.split(";")]
if len(fields) != nfields:
raise InputError("wrong number of fields %d %d - %s" %
(len(fields), nfields, line))
codes = _URange(fields[0])
(name, cont) = _ParseContinue(fields[1])
if expect_last is not None:
if (len(codes) != 1 or codes[0] <= first or
cont != "Last" or name != expect_last):
raise InputError("expected Last line for %s" %
(expect_last,))
codes = range(first, codes[0] + 1)
first = None
expect_last = None
fields[0] = "%04X..%04X" % (codes[0], codes[-1])
fields[1] = name
elif cont == "First":
if len(codes) != 1:
raise InputError("bad First line: range given")
expect_last = name
first = codes[0]
continue
doline(codes, fields)
except Exception, e:
print "%s:%d: %s" % (filename, lineno, e)
raise
if expect_last is not None:
raise InputError("expected Last line for %s; got EOF" %
(expect_last,)) | Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2). |
382,517 | def x_rolls(self, number, count=0, func=sum):
for x in range(number):
yield self.roll(count, func) | Iterator of number dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([]) |
382,518 | def visible_object_layers(self):
return (layer for layer in self.tmx.visible_layers
if isinstance(layer, pytmx.TiledObjectGroup)) | This must return layer objects
This is not required for custom data formats.
:return: Sequence of pytmx object layers/groups |
382,519 | def chebyshev_distance(point1, point2):
distance = 0.0
dimension = len(point1)
for i in range(dimension):
distance = max(distance, abs(point1[i] - point2[i]))
return distance | !
@brief Calculate Chebyshev distance between between two vectors.
\f[
dist(a, b) = \max_{}i\left (\left | a_{i} - b_{i} \right |\right );
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Chebyshev distance between two vectors.
@see euclidean_distance_square, euclidean_distance, minkowski_distance |
382,520 | def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.populate_from is not None:
kwargs[] = self.populate_from
if self.unique_with != ():
kwargs[] = self.unique_with
kwargs.pop(, None)
return name, path, args, kwargs | Deconstruct method. |
382,521 | async def create_cred(
self,
cred_offer_json,
cred_req_json: str,
cred_attrs: dict,
rr_size: int = None) -> (str, str):
LOGGER.debug(
,
cred_offer_json,
cred_req_json,
cred_attrs,
rr_size)
if not self.wallet.handle:
LOGGER.debug(, self.name)
raise WalletState(.format(self.name))
cd_id = json.loads(cred_offer_json)[]
if not ok_cred_def_id(cd_id):
LOGGER.debug(, cd_id)
raise BadIdentifier(.format(cd_id))
cred_def = json.loads(await self.get_cred_def(cd_id))
if in cred_def[]:
with REVO_CACHE.lock:
rr_id = Tails.current_rev_reg_id(self.dir_tails, cd_id)
tails = REVO_CACHE[rr_id].tails
assert tails
try:
(cred_json, cred_revoc_id, _) = await anoncreds.issuer_create_credential(
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
rr_id,
tails.reader_handle)
rv = (cred_json, cred_revoc_id)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsRevocationRegistryFullError:
(tag, rr_size_suggested) = Tails.next_tag(self.dir_tails, cd_id)
rr_id = rev_reg_id(cd_id, tag)
if self.rrbx:
await self._set_rev_reg(rr_id, rr_size)
else:
await self.rrb.create_rev_reg(rr_id, rr_size or rr_size_suggested)
await self._send_rev_reg_def(rr_id)
REVO_CACHE[rr_id].tails = await Tails(self.dir_tails, cd_id).open()
return await self.create_cred(cred_offer_json, cred_req_json, cred_attrs)
LOGGER.debug(, x_indy.error_code)
raise
else:
try:
(cred_json, _, _) = await anoncreds.issuer_create_credential(
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
None,
None)
rv = (cred_json, None)
except IndyError as x_indy:
LOGGER.debug(, x_indy.error_code)
raise
LOGGER.debug(, rv)
return rv | Create credential as Issuer out of credential request and dict of key:value (raw, unencoded)
entries for attributes.
Return credential json, and if cred def supports revocation, credential revocation identifier.
Raise WalletState for closed wallet.
If the credential definition supports revocation, and the current revocation registry is full,
the processing creates a new revocation registry en passant. Depending on the revocation
registry size (by default starting at 64 and doubling iteratively through a maximum of 100000)
and the revocation registry builder posture (see RevRegBuilder.__init__()), this operation may
delay credential creation by several seconds. The use of an external revocation registry builder
runs a parallel process, skirting this delay, but is more costly at initialization.
:param cred_offer_json: credential offer json as created by Issuer
:param cred_req_json: credential request json as created by HolderProver
:param cred_attrs: dict mapping each attribute to its original value (the operation encodes it); e.g.,
::
{
'favourite_drink': 'martini',
'height': 180,
'last_visit_date': '2017-12-31',
'weaknesses': None
}
:param rr_size: size of new revocation registry (default as per RevRegBuilder.create_rev_reg()) if necessary
:return: tuple with newly issued credential json, credential revocation identifier (if cred def
supports revocation, None otherwise). |
382,522 | def bookmarks_changed(self):
bookmarks = self.editor.get_bookmarks()
if self.editor.bookmarks != bookmarks:
self.editor.bookmarks = bookmarks
self.sig_save_bookmarks.emit(self.filename, repr(bookmarks)) | Bookmarks list has changed. |
382,523 | def has_gradient(self):
try:
self.__model.gradient
self.__model.predictions_and_gradient
except AttributeError:
return False
else:
return True | Returns true if _backward and _forward_backward can be called
by an attack, False otherwise. |
382,524 | def ws_url(self):
proto = self.request.protocol.replace(, )
host = self.application.ipython_app.websocket_host
if host == :
host = self.request.host
return "%s://%s" % (proto, host) | websocket url matching the current request
turns http[s]://host[:port] into
ws[s]://host[:port] |
382,525 | def open(server=None, url=None, ip=None, port=None, name=None, https=None, auth=None, verify_ssl_certificates=True,
proxy=None, cookies=None, verbose=True, _msgs=None):
r
if server is not None:
assert_is_type(server, H2OLocalServer)
assert_is_type(ip, None, "`ip` should be None when `server` parameter is supplied")
assert_is_type(url, None, "`url` should be None when `server` parameter is supplied")
assert_is_type(name, None, "`name` should be None when `server` parameter is supplied")
if not server.is_running():
raise H2OConnectionError("Unable to connect to server because it is not running")
ip = server.ip
port = server.port
scheme = server.scheme
context_path =
elif url is not None:
assert_is_type(url, str)
assert_is_type(ip, None, "`ip` should be None when `url` parameter is supplied")
assert_is_type(name, str, None)
if verify_ssl_certificates is None: verify_ssl_certificates = True
assert_is_type(verify_ssl_certificates, bool)
assert_is_type(proxy, str, None)
assert_is_type(auth, AuthBase, (str, str), None)
assert_is_type(cookies, str, [str], None)
assert_is_type(_msgs, None, (str, str, str))
conn = H2OConnection()
conn._verbose = bool(verbose)
conn._local_server = server
conn._base_url = "%s://%s:%d%s" % (scheme, ip, port, context_path)
conn._name = server.name if server else name
conn._verify_ssl_cert = bool(verify_ssl_certificates)
conn._auth = auth
conn._cookies = cookies
conn._proxies = None
if proxy and proxy != "(default)":
conn._proxies = {scheme: proxy}
elif not proxy:
for name in os.environ:
if name.lower() == scheme + "_proxy":
warn("Proxy is defined in the environment: %s. "
"This may interfere with your H2O Connection." % name)
try:
retries = 20 if server else 5
conn._stage = 1
conn._timeout = 3.0
conn._cluster = conn._test_connection(retries, messages=_msgs)
conn._timeout = None
atexit.register(lambda: conn.close())
except Exception:
conn._stage = 0
raise
return conn | r"""
Establish connection to an existing H2O server.
The connection is not kept alive, so what this method actually does is it attempts to connect to the
specified server, and checks that the server is healthy and responds to REST API requests. If the H2O server
cannot be reached, an :class:`H2OConnectionError` will be raised. On success this method returns a new
:class:`H2OConnection` object, and it is the only "official" way to create instances of this class.
There are 3 ways to specify the target to connect to (these settings are mutually exclusive):
* pass a ``server`` option,
* pass the full ``url`` for the connection,
* provide a triple of parameters ``ip``, ``port``, ``https``.
:param H2OLocalServer server: connect to the specified local server instance. There is a slight difference
between connecting to a local server by specifying its ip and address, and connecting through
an H2OLocalServer instance: if the server becomes unresponsive, then having access to its process handle
will allow us to query the server status through OS, and potentially provide snapshot of the server's
error log in the exception information.
:param url: full url of the server to connect to.
:param ip: target server's IP address or hostname (default "localhost").
:param port: H2O server's port (default 54321).
:param name: H2O cluster name.
:param https: if True then connect using https instead of http (default False).
:param verify_ssl_certificates: if False then SSL certificate checking will be disabled (default True). This
setting should rarely be disabled, as it makes your connection vulnerable to man-in-the-middle attacks. When
used, it will generate a warning from the requests library. Has no effect when ``https`` is False.
:param auth: authentication token for connecting to the remote server. This can be either a
(username, password) tuple, or an authenticator (AuthBase) object. Please refer to the documentation in
the ``requests.auth`` module.
:param proxy: url address of a proxy server. If you do not specify the proxy, then the requests module
will attempt to use a proxy specified in the environment (in HTTP_PROXY / HTTPS_PROXY variables). We
check for the presence of these variables and issue a warning if they are found. In order to suppress
that warning and use proxy from the environment, pass ``proxy="(default)"``.
:param cookies: Cookie (or list of) to add to requests
:param verbose: if True, then connection progress info will be printed to the stdout.
:param _msgs: custom messages to display during connection. This is a tuple (initial message, success message,
failure message).
:returns: A new :class:`H2OConnection` instance.
:raises H2OConnectionError: if the server cannot be reached.
:raises H2OServerError: if the server is in an unhealthy state (although this might be a recoverable error, the
client itself should decide whether it wants to retry or not). |
382,526 | def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer):
if len(y_true) <= batch_size * window_size:
raise ValueError("Window size (%s) larger than y_true (len=%s)."
% (batch_size, len(y_true)))
num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size)
anomalies_indices = []
for i in range(num_windows + 1):
prev_index = i * batch_size
curr_index = (window_size * batch_size) + (i * batch_size)
if i == num_windows + 1:
curr_index = len(y_true)
window_smoothed_errors = smoothed_errors[prev_index:curr_index]
window_y_true = y_true[prev_index:curr_index]
epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer)
window_anom_indices = get_anomalies(
window_smoothed_errors,
window_y_true,
sd_threshold,
i,
anomalies_indices,
error_buffer
)
mu = np.mean(window_smoothed_errors)
smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors]
epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer)
inv_anom_indices = get_anomalies(
smoothed_errors_inv,
window_y_true,
sd_inv,
i,
anomalies_indices,
len(y_true)
)
anomalies_indices = list(set(anomalies_indices + inv_anom_indices))
anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices])
anomalies_indices = sorted(list(set(anomalies_indices)))
anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)]
anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]]
anomalies_scores = []
for e_seq in anomaly_sequences:
denominator = np.mean(smoothed_errors) + np.std(smoothed_errors)
score = max([
abs(smoothed_errors[x] - epsilon) / denominator
for x in range(e_seq[0], e_seq[1])
])
anomalies_scores.append(score)
return anomaly_sequences, anomalies_scores | Extracts anomalies from the errors.
Args:
y_true ():
smoothed_errors ():
window_size (int):
batch_size (int):
error_buffer (int):
Returns: |
382,527 | def _update_limits_from_api(self):
try:
self.connect()
resp = self.conn.get_send_quota()
except EndpointConnectionError as ex:
logger.warning(, str(ex))
return
except ClientError as ex:
if ex.response[][] in [, ]:
logger.warning(, ex)
return
raise
self.limits[]._set_api_limit(resp[]) | Call the service's API action to retrieve limit/quota information, and
update AwsLimit objects in ``self.limits`` with this information. |
382,528 | def _get_quantile_ratio(self, X, y):
y_pred = self.predict(X)
return (y_pred > y).mean() | find the expirical quantile of the model
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
Returns
-------
ratio : float on [0, 1] |
382,529 | def top_priority_effect(effects):
if len(effects) == 0:
raise ValueError("List of effects cannot be empty")
effects = map(
select_between_exonic_splice_site_and_alternate_effect,
effects)
effects_grouped_by_gene = apply_groupby(
effects, fn=gene_id_of_associated_transcript, skip_none=False)
if None in effects_grouped_by_gene:
effects_without_genes = effects_grouped_by_gene.pop(None)
else:
effects_without_genes = []
if len(effects_grouped_by_gene) > 0:
effects_with_genes = [
top_priority_effect_for_single_gene(gene_effects)
for gene_effects in effects_grouped_by_gene.values()
]
return max(effects_with_genes, key=multi_gene_effect_sort_key)
else:
assert len(effects_without_genes) > 0
return max(effects_without_genes, key=multi_gene_effect_sort_key) | Given a collection of variant transcript effects,
return the top priority object. ExonicSpliceSite variants require special
treatment since they actually represent two effects -- the splicing modification
and whatever else would happen to the exonic sequence if nothing else gets
changed. In cases where multiple transcripts give rise to multiple
effects, use a variety of filtering and sorting heuristics to pick
the canonical transcript. |
382,530 | def get_file(self, name, filename):
stream, vname = self.get_stream(name)
path, version = split_name(vname)
dir_path = os.path.dirname(filename)
if dir_path:
mkdir(dir_path)
with open(filename, ) as f:
shutil.copyfileobj(stream, f)
return vname | Saves the content of file named ``name`` to ``filename``.
Works like :meth:`get_stream`, but ``filename`` is the name of
a file which will be created (or overwritten).
Returns the full versioned name of the retrieved file. |
382,531 | def apply_strategy(self):
method_id = self.conf.strategy.replace(, )
if not hasattr(DuplicateSet, method_id):
raise NotImplementedError(
"DuplicateSet.{}() method.".format(method_id))
return getattr(self, method_id)() | Apply deduplication with the configured strategy.
Transform strategy keyword into its method ID, and call it. |
382,532 | def do_create(marfile, files, compress, productversion=None, channel=None,
signing_key=None, signing_algorithm=None):
with open(marfile, ) as f:
with MarWriter(f, productversion=productversion, channel=channel,
signing_key=signing_key,
signing_algorithm=signing_algorithm,
) as m:
for f in files:
m.add(f, compress=compress) | Create a new MAR file. |
382,533 | def run(self, clf):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.info(
)
self.sl.distribute([self.data], self.mask)
self.sl.broadcast((self.labels, self.num_folds, clf))
if rank == 0:
logger.info(
)
result_volume = self.sl.run_searchlight(_sfn)
result_list = result_volume[self.mask]
results = []
if rank == 0:
for idx, value in enumerate(result_list):
if value is None:
value = 0
results.append((idx, value))
results.sort(key=lambda tup: tup[1], reverse=True)
logger.info(
)
return result_volume, results | run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels |
382,534 | def purge(self):
self.channel.purge_stream(self.stream_id, remove_definition=False, sandbox=None) | Purge the stream. This removes all data and clears the calculated intervals
:return: None |
382,535 | def check(self, feature):
mapper = feature.as_dataframe_mapper()
mapper.fit(self.X, y=self.y) | Check that fit can be called on reference data |
382,536 | def isempty(path):
if op.isdir(path):
return [] == os.listdir(path)
elif op.isfile(path):
return 0 == os.stat(path).st_size
return None | Returns True if the given file or directory path is empty.
**Examples**:
::
auxly.filesys.isempty("foo.txt") # Works on files...
auxly.filesys.isempty("bar") # ...or directories! |
382,537 | def get_scaled(self, factor):
res = TimeUnit(self)
res._factor = self._factor * factor
res._unit = self._unit
return res | Get a new time unit, scaled by the given factor |
382,538 | def load_essentiality(self, model):
data = self.config.get("essentiality")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
join("data", "experimental", "essentiality"))
for exp_id, exp in iteritems(experiments):
if exp is None:
exp = dict()
filename = exp.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(exp_id))
elif not isabs(filename):
filename = join(path, filename)
experiment = EssentialityExperiment(
identifier=exp_id, obj=exp, filename=filename)
if experiment.medium is not None:
assert experiment.medium in self.media, \
"Experiment has an undefined medium .".format(
exp_id, experiment.medium)
experiment.medium = self.media[experiment.medium]
experiment.load()
experiment.validate(model)
self.essentiality[exp_id] = experiment | Load and validate all data files. |
382,539 | def _get_nop_length(cls, insns):
nop_length = 0
if insns and cls._is_noop_insn(insns[0]):
for insn in insns:
if cls._is_noop_insn(insn):
nop_length += insn.size
else:
break
return nop_length | Calculate the total size of leading nop instructions.
:param insns: A list of capstone insn objects.
:return: Number of bytes of leading nop instructions.
:rtype: int |
382,540 | def convertDict2Attrs(self, *args, **kwargs):
for n,a in enumerate(self.attrs):
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
task = self.mambutaskclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambutaskclass = MambuTask
task = self.mambutaskclass(urlfunc=None, entid=None, *args, **kwargs)
task.init(a, *args, **kwargs)
self.attrs[n] = task | The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Task object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuTask just
created. |
382,541 | def parse_deps(orig_doc, options={}):
doc = Doc(orig_doc.vocab).from_bytes(orig_doc.to_bytes())
if not doc.is_parsed:
user_warning(Warnings.W005)
if options.get("collapse_phrases", False):
with doc.retokenize() as retokenizer:
for np in list(doc.noun_chunks):
attrs = {
"tag": np.root.tag_,
"lemma": np.root.lemma_,
"ent_type": np.root.ent_type_,
}
retokenizer.merge(np, attrs=attrs)
if options.get("collapse_punct", True):
spans = []
for word in doc[:-1]:
if word.is_punct or not word.nbor(1).is_punct:
continue
start = word.i
end = word.i + 1
while end < len(doc) and doc[end].is_punct:
end += 1
span = doc[start:end]
spans.append((span, word.tag_, word.lemma_, word.ent_type_))
with doc.retokenize() as retokenizer:
for span, tag, lemma, ent_type in spans:
attrs = {"tag": tag, "lemma": lemma, "ent_type": ent_type}
retokenizer.merge(span, attrs=attrs)
if options.get("fine_grained"):
words = [{"text": w.text, "tag": w.tag_} for w in doc]
else:
words = [{"text": w.text, "tag": w.pos_} for w in doc]
arcs = []
for word in doc:
if word.i < word.head.i:
arcs.append(
{"start": word.i, "end": word.head.i, "label": word.dep_, "dir": "left"}
)
elif word.i > word.head.i:
arcs.append(
{
"start": word.head.i,
"end": word.i,
"label": word.dep_,
"dir": "right",
}
)
return {"words": words, "arcs": arcs, "settings": get_doc_settings(orig_doc)} | Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs. |
382,542 | def post_async(self, url, data, callback=None, params=None, headers=None):
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_post_request,
args=(endpoint, data, params, headers),
callback=callback) | Asynchronous POST request with the process pool. |
382,543 | def supports_object_type(self, object_type=None):
from .osid_errors import IllegalState, NullArgument
if not object_type:
raise NullArgument()
if self._kwargs[] not in []:
raise IllegalState()
return object_type in self.get_object_types | Tests if the given object type is supported.
arg: object_type (osid.type.Type): an object Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not an ``OBJECT``
raise: NullArgument - ``object_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
382,544 | def mosh_args(conn):
I, = conn.identities
identity = I.identity_dict
args = []
if in identity:
args += [, identity[]]
if in identity:
args += [identity[]++identity[]]
else:
args += [identity[]]
return args | Create SSH command for connecting specified server. |
382,545 | def allowance(self, filename):
for line in self.rulelines:
if line.applies_to(filename):
return line.allowance
return True | Preconditions:
- our agent applies to this entry
- filename is URL decoded |
382,546 | def bgseq(code):
if isinstance(code, str):
code = nametonum(code)
if code == -1:
return ""
s = termcap.get(, code) or termcap.get(, code)
return s | Returns the background color terminal escape sequence for the given color code number. |
382,547 | def consume_messages(self, max_next_messages):
if self.__next_messages == 0:
self.set_next_messages(min(1000, max_next_messages))
self.set_next_messages(min(self.__next_messages, max_next_messages))
mark = time.time()
for record in self._get_messages_from_consumer():
yield record.partition, record.offset, record.key, record.value
newmark = time.time()
if newmark - mark > 30:
self.set_next_messages(self.__next_messages / 2 or 1)
elif newmark - mark < 5:
self.set_next_messages(min(self.__next_messages + 100, max_next_messages)) | Get messages batch from Kafka (list at output) |
382,548 | def _search_files(path):
path = pathlib.Path(path)
fifo = []
for fp in path.glob("*"):
if fp.is_dir():
continue
for fmt in formats:
if not fmt.is_series and fmt.verify(fp):
fifo.append((fp, fmt.__name__))
break
theformats = [ff[1] for ff in fifo]
formset = set(theformats)
if len(formset) > 1:
fmts_qpimage = ["SingleHdf5Qpimage", "SeriesHdf5Qpimage"]
fifo = [ff for ff in fifo if ff[1] not in fmts_qpimage]
if len(formset) > 1 and "SingleTifPhasics" in theformats:
fmts_badtif = "SingleTifHolo"
fifo = [ff for ff in fifo if ff[1] not in fmts_badtif]
theformats2 = [ff[1] for ff in fifo]
formset2 = set(theformats2)
if len(formset2) > 1:
msg = "Qpformat does not support multiple different file " \
+ "formats within one directory: {}".format(formset2)
raise MultipleFormatsNotSupportedError(msg)
fifo = sorted(fifo)
return fifo | Search a folder for data files
.. versionchanged:: 0.6.0
`path` is not searched recursively anymore |
382,549 | def semiyearly(date=datetime.date.today()):
return datetime.date(date.year, 1 if date.month < 7 else 7, 1) | Twice a year. |
382,550 | def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
assert (modelParamsHash is not None)
if completed:
matured = True
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams[][]
if not hidden:
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
genIdx = modelParams[][]
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry[]
if entry[] != modelParamsHash:
self._paramsHashToIndexes.pop(entry[])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry[] = modelParamsHash
modelParams = entry[]
swarmId = modelParams[][]
genIdx = modelParams[][]
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
entry[] = errScore
entry[] = completed
entry[] = matured
entry[] = numRecords
entry[] = hidden
particleId = modelParams[][]
genIdx = modelParams[][]
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams[])
self._particleBest[particleId] = (errScore, pos)
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
if not hidden:
swarmId = modelParams[][]
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore | Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric |
382,551 | def aliases(*names):
def wrapper(func):
setattr(func, ATTR_ALIASES, names)
return func
return wrapper | Defines alternative command name(s) for given function (along with its
original name). Usage::
@aliases('co', 'check')
def checkout(args):
...
The resulting command will be available as ``checkout``, ``check`` and ``co``.
.. note::
This decorator only works with a recent version of argparse (see `Python
issue 9324`_ and `Python rev 4c0426`_). Such version ships with
**Python 3.2+** and may be available in other environments as a separate
package. Argh does not issue warnings and simply ignores aliases if
they are not supported. See :attr:`~argh.assembling.SUPPORTS_ALIASES`.
.. _Python issue 9324: http://bugs.python.org/issue9324
.. _Python rev 4c0426: http://hg.python.org/cpython/rev/4c0426261148/
.. versionadded:: 0.19 |
382,552 | def get_channelstate_by_token_network_and_partner(
chain_state: ChainState,
token_network_id: TokenNetworkID,
partner_address: Address,
) -> Optional[NettingChannelState]:
token_network = get_token_network_by_identifier(
chain_state,
token_network_id,
)
channel_state = None
if token_network:
channels = [
token_network.channelidentifiers_to_channels[channel_id]
for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]
]
states = filter_channels_by_status(
channels,
[CHANNEL_STATE_UNUSABLE],
)
if states:
channel_state = states[-1]
return channel_state | Return the NettingChannelState if it exists, None otherwise. |
382,553 | def dumps(obj, *args, **kwargs):
return json.dumps(obj, *args, cls=TypelessSONEncoder, ensure_ascii=False, **kwargs) | Typeless dump an object to json string |
382,554 | def consistency(self):
result = self.total_duration.jd / (self.max_time - self.min_time).jd
return result | Get a percentage of fill between the min and max time the moc is defined.
A value near 0 shows a sparse temporal moc (i.e. the moc does not cover a lot
of time and covers very distant times. A value near 1 means that the moc covers
a lot of time without big pauses.
Returns
-------
result : float
fill percentage (between 0 and 1.) |
382,555 | def _make_plan(plan_dict):
operator_type = plan_dict["operatorType"]
identifiers = plan_dict.get("identifiers", [])
arguments = plan_dict.get("args", [])
children = [_make_plan(child) for child in plan_dict.get("children", [])]
if "dbHits" in plan_dict or "rows" in plan_dict:
db_hits = plan_dict.get("dbHits", 0)
rows = plan_dict.get("rows", 0)
return ProfiledPlan(operator_type, identifiers, arguments, children, db_hits, rows)
else:
return Plan(operator_type, identifiers, arguments, children) | Construct a Plan or ProfiledPlan from a dictionary of metadata values.
:param plan_dict:
:return: |
382,556 | def GetParametro(self, clave, clave1=None, clave2=None, clave3=None, clave4=None):
"Devuelve un parámetro de salida (establecido por llamada anterior)"
valor = self.params_out.get(clave)
for clave in (clave1, clave2, clave3, clave4):
if clave is not None and valor is not None:
if isinstance(clave1, basestring) and clave.isdigit():
clave = int(clave)
try:
valor = valor[clave]
except (KeyError, IndexError):
valor = None
if valor is not None:
if isinstance(valor, basestring):
return valor
else:
return str(valor)
else:
return "" | Devuelve un parámetro de salida (establecido por llamada anterior) |
382,557 | def build_job_configs(self, args):
job_configs = {}
components = Component.build_from_yamlfile(args[])
NAME_FACTORY.update_base_dict(args[])
ret_dict = make_diffuse_comp_info_dict(components=components,
library=args[],
basedir=NAME_FACTORY.base_dict[])
diffuse_comp_info_dict = ret_dict[]
for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()):
diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key]
for comp in components:
zcut = "zmax%i" % comp.zmax
key = comp.make_key()
if diffuse_comp_info_value.components is None:
sub_comp_info = diffuse_comp_info_value
else:
sub_comp_info = diffuse_comp_info_value.get_component_info(comp)
name_keys = dict(zcut=zcut,
sourcekey=sub_comp_info.sourcekey,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
mktime=,
coordsys=comp.coordsys,
irf_ver=NAME_FACTORY.irf_ver(),
fullpath=True)
outfile = NAME_FACTORY.srcmaps(**name_keys)
outfile_tokens = os.path.splitext(outfile)
infile_regexp = "%s_*.fits*" % outfile_tokens[0]
full_key = "%s_%s" % (sub_comp_info.sourcekey, key)
logfile = make_nfs_path(outfile.replace(, ))
job_configs[full_key] = dict(output=outfile,
args=infile_regexp,
hdu=sub_comp_info.source_name,
logfile=logfile)
return job_configs | Hook to build job configurations |
382,558 | def delay_and_stop(duration, dll, device_number):
xinput = getattr(ctypes.windll, dll)
time.sleep(duration/1000)
xinput_set_state = xinput.XInputSetState
xinput_set_state.argtypes = [
ctypes.c_uint, ctypes.POINTER(XinputVibration)]
xinput_set_state.restype = ctypes.c_uint
vibration = XinputVibration(0, 0)
xinput_set_state(device_number, ctypes.byref(vibration)) | Stop vibration aka force feedback aka rumble on
Windows after duration miliseconds. |
382,559 | def verify_merkle_path(merkle_root_hex, serialized_path, leaf_hash_hex, hash_function=bin_double_sha256):
merkle_root = hex_to_bin_reversed(merkle_root_hex)
leaf_hash = hex_to_bin_reversed(leaf_hash_hex)
path = MerkleTree.path_deserialize(serialized_path)
path = [{: p[], : hex_to_bin_reversed(p[])} for p in path]
if len(path) == 0:
raise ValueError("Empty path")
cur_hash = leaf_hash
for i in range(0, len(path)):
if path[i][] == :
cur_hash = hash_function(path[i][] + cur_hash)
elif path[i][] == :
cur_hash = hash_function(cur_hash + path[i][])
elif path[i][] == :
assert len(path) == 1
return cur_hash == path[i][]
return cur_hash == merkle_root | Verify a merkle path. The given path is the path from two leaf nodes to the root itself.
merkle_root_hex is a little-endian, hex-encoded hash.
serialized_path is the serialized merkle path
path_hex is a list of little-endian, hex-encoded hashes.
Return True if the path is consistent with the merkle root.
Return False if not. |
382,560 | def _get_job_results(query=None):
if not query:
raise CommandExecutionError("Query parameters cannot be empty.")
response = __proxy__[](query)
if in response and in response[]:
jid = response[][]
while get_job(jid)[][][] != :
time.sleep(5)
return get_job(jid)
else:
return response | Executes a query that requires a job for completion. This function will wait for the job to complete
and return the results. |
382,561 | def pace(self):
secs_per_km = self.duration / (self.distance / 1000)
return time.strftime(, time.gmtime(secs_per_km)) | Average pace (mm:ss/km for the workout |
382,562 | def cauldron_extras(self):
for extra in super(Dimension, self).cauldron_extras:
yield extra
if self.formatters:
prop = self.id +
else:
prop = self.id_prop
yield self.id + , lambda row: getattr(row, prop) | Yield extra tuples containing a field name and a callable that takes
a row |
382,563 | def add_edges(self):
for group, edgelist in self.edges.items():
for (u, v, d) in edgelist:
self.draw_edge(u, v, d, group) | Draws all of the edges in the graph. |
382,564 | def _definition(self):
headerReference = self._sectPr.get_headerReference(self._hdrftr_index)
return self._document_part.header_part(headerReference.rId) | |HeaderPart| object containing content of this header. |
382,565 | def update_layers_esri_mapserver(service, greedy_opt=False):
try:
esri_service = ArcMapService(service.url)
srs_code = esri_service.spatialReference.wkid
srs, created = SpatialReferenceSystem.objects.get_or_create(code=srs_code)
service.srs.add(srs)
service.update_validity()
if in esri_service._json_struct and greedy_opt:
if in esri_service._json_struct[]:
wms_url = service.url.replace(, )
if in wms_url:
wms_url = wms_url.replace(, )
if in wms_url:
wms_url = wms_url.replace(, )
LOGGER.debug( % wms_url)
from utils import create_service_from_endpoint
create_service_from_endpoint(wms_url, , catalog=service.catalog)
layer_n = 0
total = len(esri_service.layers)
for esri_layer in esri_service.layers:
if not in esri_layer._json_struct:
LOGGER.debug( % esri_layer.name)
layer, created = Layer.objects.get_or_create(
name=esri_layer.id,
service=service,
catalog=service.catalog
)
if layer.active:
layer.type =
links = [[layer.type, service.url],
[, settings.SITE_URL.rstrip() + + layer.get_url_endpoint()]]
layer.title = esri_layer.name
layer.abstract = esri_service.serviceDescription
layer.url = service.url
layer.page_url = layer.get_absolute_url
links.append([
,
settings.SITE_URL.rstrip() + layer.page_url
])
try:
layer.bbox_x0 = esri_layer.extent.xmin
layer.bbox_y0 = esri_layer.extent.ymin
layer.bbox_x1 = esri_layer.extent.xmax
layer.bbox_y1 = esri_layer.extent.ymax
except KeyError:
pass
try:
layer.bbox_x0 = esri_layer._json_struct[][]
layer.bbox_y0 = esri_layer._json_struct[][]
layer.bbox_x1 = esri_layer._json_struct[][]
layer.bbox_y1 = esri_layer._json_struct[][]
except Exception:
pass
layer.wkt_geometry = bbox2wktpolygon([layer.bbox_x0, layer.bbox_y0, layer.bbox_x1, layer.bbox_y1])
layer.xml = create_metadata_record(
identifier=str(layer.uuid),
source=service.url,
links=links,
format=,
type=layer.csw_type,
relation=service.id_string,
title=layer.title,
alternative=layer.title,
abstract=layer.abstract,
wkt_geometry=layer.wkt_geometry
)
layer.anytext = gen_anytext(layer.title, layer.abstract)
layer.save()
add_mined_dates(layer)
layer_n = layer_n + 1
LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total))
if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER:
return
except Exception as err:
message = "update_layers_esri_mapserver: {0}".format(
err
)
check = Check(
content_object=service,
success=False,
response_time=0,
message=message
)
check.save() | Update layers for an ESRI REST MapServer.
Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/SampleWorldCities/MapServer/?f=json |
382,566 | def list_ec2(region, filter_by_kwargs):
conn = boto.ec2.connect_to_region(region)
instances = conn.get_only_instances()
return lookup(instances, filter_by=filter_by_kwargs) | List running ec2 instances. |
382,567 | def callprop(self, prop, *args):
if not isinstance(prop, basestring):
prop = prop.to_string().value
cand = self.get(prop)
if not cand.is_callable():
raise MakeError(,
% cand.typeof())
return cand.call(self, args) | Call a property prop as a method (this will be self).
NOTE: dont pass this and arguments here, these will be added
automatically! |
382,568 | def jens_transformation_beta(graph: BELGraph) -> DiGraph:
result = DiGraph()
for u, v, d in graph.edges(data=True):
relation = d[RELATION]
if relation == NEGATIVE_CORRELATION:
result.add_edge(u, v)
result.add_edge(v, u)
elif relation in CAUSAL_INCREASE_RELATIONS:
result.add_edge(v, u)
elif relation in CAUSAL_DECREASE_RELATIONS:
result.add_edge(u, v)
return result | Apply Jens' Transformation (Type 2) to the graph.
1. Induce a sub-graph over causal and correlative relations
2. Transform edges with the following rules:
- increases => backwards decreases
- decreases => decreases
- positive correlation => delete
- negative correlation => two way decreases
The resulting graph can be used to search for 3-cycles, which now symbolize stable triples where ``A -> B``,
``A -| C`` and ``B negativeCorrelation C``. |
382,569 | def readLiteralContextModes(self):
print(.center(60, ))
self.literalContextModes = []
for i in range(self.numberOfBlockTypes[L]):
self.literalContextModes.append(
self.verboseRead(LiteralContextMode(number=i))) | Read literal context modes.
LSB6: lower 6 bits of last char
MSB6: upper 6 bits of last char
UTF8: rougly dependent on categories:
upper 4 bits depend on category of last char:
control/whitespace/space/ punctuation/quote/%/open/close/
comma/period/=/digits/ VOWEL/CONSONANT/vowel/consonant
lower 2 bits depend on category of 2nd last char:
space/punctuation/digit or upper/lowercase
signed: hamming weight of last 2 chars |
382,570 | def get_missing_simulations(self, param_list, runs=None):
params_to_simulate = []
if runs is not None:
next_runs = self.db.get_next_rngruns()
available_params = [r[] for r in self.db.get_results()]
for param_comb in param_list:
needed_runs = runs
for i, p in enumerate(available_params):
if param_comb == {k: p[k] for k in p.keys() if k != "RngRun"}:
needed_runs -= 1
new_param_combs = []
for needed_run in range(needed_runs):
new_param = deepcopy(param_comb)
new_param[] = next(next_runs)
new_param_combs += [new_param]
params_to_simulate += new_param_combs
else:
for param_comb in param_list:
if not self.db.get_results(param_comb):
params_to_simulate += [param_comb]
return params_to_simulate | Return a list of the simulations among the required ones that are not
available in the database.
Args:
param_list (list): a list of dictionaries containing all the
parameters combinations.
runs (int): an integer representing how many repetitions are wanted
for each parameter combination, None if the dictionaries in
param_list already feature the desired RngRun value. |
382,571 | def nnz_obs_groups(self):
og = []
obs = self.observation_data
for g in self.obs_groups:
if obs.loc[obs.obgnme==g,"weight"].sum() > 0.0:
og.append(g)
return og | get the observation groups that contain at least one non-zero weighted
observation
Returns
-------
nnz_obs_groups : list
a list of observation groups that contain at
least one non-zero weighted observation |
382,572 | def put(self, key, value):
value = self.serializedValue(value)
self.child_datastore.put(key, value) | Stores the object `value` named by `key`.
Serializes values on the way in, and stores the serialized data into the
``child_datastore``.
Args:
key: Key naming `value`
value: the object to store. |
382,573 | def create_api_key(awsclient, api_name, api_key_name):
_sleep()
client_api = awsclient.get_client()
print( % api_key_name)
response = client_api.create_api_key(
name=api_key_name,
description= + api_name,
enabled=True
)
print(%s\ % response[])
return response[] | Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key |
382,574 | def _execute(self, method_function, method_name, resource, **params):
resource_uri = "{api_host}{resource}".format(api_host=self.api_host, resource=resource)
url_encoded_fields = self._encode_params(params)
headers = RestClient.generate_telesign_headers(self.customer_id,
self.api_key,
method_name,
resource,
url_encoded_fields,
user_agent=self.user_agent)
if method_name in [, ]:
payload = {: url_encoded_fields}
else:
payload = {: url_encoded_fields}
response = self.Response(method_function(resource_uri,
headers=headers,
timeout=self.timeout,
**payload))
return response | Generic TeleSign REST API request handler.
:param method_function: The Requests HTTP request function to perform the request.
:param method_name: The HTTP method name, as an upper case string.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the HTTP request with, as a dictionary.
:return: The RestClient Response object. |
382,575 | def set_weekly(self, interval, *, days_of_week, first_day_of_week,
**kwargs):
self.set_daily(interval, **kwargs)
self.__days_of_week = set(days_of_week)
self.__first_day_of_week = first_day_of_week | Set to repeat every week on specified days for every x no. of days
:param int interval: no. of days to repeat at
:param str first_day_of_week: starting day for a week
:param list[str] days_of_week: list of days of the week to repeat
:keyword date start: Start date of repetition (kwargs)
:keyword date end: End date of repetition (kwargs)
:keyword int occurrences: no of occurrences (kwargs) |
382,576 | def get_recover_position(gzfile, last_good_position):
with closing(mmap.mmap(gzfile.fileno(), 0, access=mmap.ACCESS_READ)) as m:
return m.find(GZIP_SIGNATURE, last_good_position + 1) | Return position of a next gzip stream in a GzipFile,
or -1 if it is not found.
XXX: caller must ensure that the same last_good_position
is not used multiple times for the same gzfile. |
382,577 | def _parse(reactor, directory, pemdir, *args, **kwargs):
def colon_join(items):
return .join([item.replace(, ) for item in items])
sub = colon_join(list(args) + [.join(item) for item in kwargs.items()])
pem_path = FilePath(pemdir).asTextMode()
acme_key = load_or_create_client_key(pem_path)
return AutoTLSEndpoint(
reactor=reactor,
directory=directory,
client_creator=partial(Client.from_url, key=acme_key, alg=RS256),
cert_store=DirectoryStore(pem_path),
cert_mapping=HostDirectoryMap(pem_path),
sub_endpoint=serverFromString(reactor, sub)) | Parse a txacme endpoint description.
:param reactor: The Twisted reactor.
:param directory: ``twisted.python.url.URL`` for the ACME directory to use
for issuing certs.
:param str pemdir: The path to the certificate directory to use. |
382,578 | def closest_pixel_to_set(self, start, pixel_set, direction, w=13, t=0.5):
y, x = np.meshgrid(np.arange(w) - w / 2, np.arange(w) - w / 2)
cur_px_y = np.ravel(y + start[0]).astype(np.uint16)
cur_px_x = np.ravel(x + start[1]).astype(np.uint16)
cur_px = set(zip(cur_px_y, cur_px_x))
includes = True
if np.all(
cur_px_y >= 0) and np.all(
cur_px_y < self.height) and np.all(
cur_px_x >= 0) and np.all(
cur_px_x < self.width):
includes = not cur_px.isdisjoint(pixel_set)
else:
return None
while not includes:
start = start + t * direction
cur_px_y = np.ravel(y + start[0]).astype(np.uint16)
cur_px_x = np.ravel(x + start[1]).astype(np.uint16)
cur_px = set(zip(cur_px_y, cur_px_x))
if np.all(
cur_px_y >= 0) and np.all(
cur_px_y < self.height) and np.all(
cur_px_x >= 0) and np.all(
cur_px_x < self.width):
includes = not cur_px.isdisjoint(pixel_set)
else:
return None
return start | Starting at pixel, moves start by direction * t until there is a
pixel from pixel_set within a radius w of start. Then, returns start.
Parameters
----------
start : :obj:`numpy.ndarray` of float
The initial pixel location at which to start.
pixel_set : set of 2-tuples of float
The set of pixels to check set intersection with
direction : :obj:`numpy.ndarray` of float
The 2D direction vector in which to move pixel.
w : int
A circular diameter in which to check for pixels.
As soon as the current pixel has some non-zero pixel with a diameter
w of it, this function returns the current pixel location.
t : float
The step size with which to move pixel along direction.
Returns
-------
:obj:`numpy.ndarray` of float
The first pixel location along the direction vector at which there
exists some intersection with pixel_set within a radius w. |
382,579 | def JoinKeyPath(path_segments):
path_segments = [
segment.split(definitions.KEY_PATH_SEPARATOR)
for segment in path_segments]
path_segments = [
element for sublist in path_segments for element in sublist]
path_segments = filter(None, path_segments)
key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments)
if not key_path.startswith():
key_path = .format(definitions.KEY_PATH_SEPARATOR, key_path)
return key_path | Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path. |
382,580 | def segment_snrs(filters, stilde, psd, low_frequency_cutoff):
snrs = []
norms = []
for bank_template in filters:
snr, _, norm = matched_filter_core(
bank_template, stilde, h_norm=bank_template.sigmasq(psd),
psd=None, low_frequency_cutoff=low_frequency_cutoff)
snrs.append(snr)
norms.append(norm)
return snrs, norms | This functions calculates the snr of each bank veto template against
the segment
Parameters
----------
filters: list of FrequencySeries
The list of bank veto templates filters.
stilde: FrequencySeries
The current segment of data.
psd: FrequencySeries
low_frequency_cutoff: float
Returns
-------
snr (list): List of snr time series.
norm (list): List of normalizations factors for the snr time series. |
382,581 | def access_key(self, data):
if data.startswith(b):
new = binascii.unhexlify(data[2:])
else:
new = data
if len(new) == 6:
self.access_code = new
else:
raise yubico_exception.InputError() | Set a new access code which will be required for future re-programmings of your YubiKey.
Supply data as either a raw string, or a hexlified string prefixed by 'h:'.
The result, after any hex decoding, must be 6 bytes. |
382,582 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.section_titles is not None:
_dict[] = [
x._to_dict() for x in self.section_titles
]
if hasattr(self,
) and self.leading_sentences is not None:
_dict[] = [
x._to_dict() for x in self.leading_sentences
]
return _dict | Return a json dictionary representing this model. |
382,583 | def transliterate(self, target_language="en"):
return WordList([w.transliterate(target_language) for w in self.words],
language=target_language, parent=self) | Transliterate the string to the target language. |
382,584 | def SetSelected( self, node, point=None, propagate=True ):
if node == self.selectedNode:
return
self.selectedNode = node
self.UpdateDrawing()
if node:
wx.PostEvent( self, SquareSelectionEvent( node=node, point=point, map=self ) ) | Set the given node selected in the square-map |
382,585 | def get_operator(self, operator):
op = {
: ,
: ,
: ,
: ,
: ,
: ,
: ,
}.get(operator)
if op is not None:
return op, False
op = {
: ,
: ,
: ,
}[operator]
return op, True | Get a comparison suffix to be used in Django ORM & inversion flag for it
:param operator: string, DjangoQL comparison operator
:return: (suffix, invert) - a tuple with 2 values:
suffix - suffix to be used in ORM query, for example '__gt' for '>'
invert - boolean, True if this comparison needs to be inverted |
382,586 | def delete_contribution(self, url):
try:
result = self.api_request(url)
if in result and in result:
self.api_request(result[], method=)
return True
except:
pass
return False | Delete the contribution with this identifier
:rtype: bool
:returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist) |
382,587 | def assign(self, partitions):
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions]) | Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): Assignment for this instance.
Raises:
IllegalStateError: If consumer has already called
:meth:`~kafka.KafkaConsumer.subscribe`.
Warning:
It is not possible to use both manual partition assignment with
:meth:`~kafka.KafkaConsumer.assign` and group assignment with
:meth:`~kafka.KafkaConsumer.subscribe`.
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change. |
382,588 | def get_bound_form(self, noun, gender):
syllables = self.syllabifier.syllabify(noun)
stem = self.stemmer.get_stem(noun, gender)
cv_pattern = self.cv_patterner.get_cv_pattern(stem)
if [letter[0] for letter in cv_pattern[-2:]] == [, ] or stem in []:
if len(syllables) > 2:
if stem in []:
return
else:
return stem
elif len(syllables) > 1:
return stem
if stem in [, ]:
return stem +
if cv_pattern[-1][:2] == cv_pattern[-2][:2]:
if 3 > len(syllables) > 1:
return stem +
if len(syllables) > 2 and cv_pattern[-1][2] + cv_pattern[-2][2] == :
return stem +
if len(syllables) > 2:
return stem[:-1]
if cv_pattern[-1][0] == cv_pattern[-2][0] and cv_pattern[-1][1] != cv_pattern[-2][1]:
return stem[:-1] + stem[1] + stem[-1]
if cv_pattern[-1][2] == and cv_pattern[-2][0] == :
if len(syllables) > 2:
return stem +
if len(syllables) > 1:
if stem in []:
return stem +
if stem in []:
return stem[:-1] + stem[1] + stem[-1] | Return bound form of nound, given its gender. |
382,589 | def datatype(dbtype, description, cursor):
dt = cursor.db.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt | Google AppEngine Helper to convert a data type into a string. |
382,590 | def cudaMemcpy_htod(dst, src, count):
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyHostToDevice)
cudaCheckStatus(status) | Copy memory from host to device.
Copy data from host memory to device memory.
Parameters
----------
dst : ctypes pointer
Device memory pointer.
src : ctypes pointer
Host memory pointer.
count : int
Number of bytes to copy. |
382,591 | def run(self, clock, generalLedger):
for c in self.components:
c.run(clock, generalLedger)
for a in self.activities:
a.run(clock, generalLedger) | Execute the component at the current clock cycle.
:param clock: The clock containing the current execution time and
period information.
:param generalLedger: The general ledger into which to create the
transactions. |
382,592 | def IPNetwork(address, version=None, strict=False):
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError( %
address) | Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given. |
382,593 | def safe_getattr(brain_or_object, attr, default=_marker):
try:
value = getattr(brain_or_object, attr, _marker)
if value is _marker:
if default is not _marker:
return default
fail("Attribute not found.".format(attr))
if callable(value):
return value()
return value
except Unauthorized:
if default is not _marker:
return default
fail("You are not authorized to access of .".format(
attr, repr(brain_or_object))) | Return the attribute value
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param attr: Attribute name
:type attr: str
:returns: Attribute value
:rtype: obj |
382,594 | def setval(self, varname, value):
if varname in self:
self[varname][] = value
else:
self[varname] = Variable(self.default_type, value=value) | Set the value of the variable with the given name. |
382,595 | def retrieve_pt(cls, request, service):
try:
pgt = cls.objects.get(user=request.user, session_key=request.session.session_key).pgt
except cls.DoesNotExist:
raise ProxyError(
"INVALID_TICKET",
"No proxy ticket found for this HttpRequest object"
)
else:
client = get_cas_client(service_url=service, request=request)
try:
return client.get_proxy_ticket(pgt)
except CASError as error:
raise ProxyError(*error.args)
except Exception as e:
raise ProxyError(e) | `request` should be the current HttpRequest object
`service` a string representing the service for witch we want to
retrieve a ticket.
The function return a Proxy Ticket or raise `ProxyError` |
382,596 | def chunks(l:Collection, n:int)->Iterable:
"Yield successive `n`-sized chunks from `l`."
for i in range(0, len(l), n): yield l[i:i+n] | Yield successive `n`-sized chunks from `l`. |
382,597 | def handle(self, source, target, app=None, **options):
translation.activate(settings.LANGUAGE_CODE)
if app:
unpack = app.split()
if len(unpack) == 2:
models = [get_model(unpack[0], unpack[1])]
elif len(unpack) == 1:
models = get_models(get_app(unpack[0]))
else:
models = get_models()
for model in models:
if hasattr(model, ):
model_full_name = % (model._meta.app_label, model._meta.module_name)
update_instances = set()
messages = []
for instance in model.objects.all():
for field in model.localized_fields:
source_field = get_real_fieldname(field, source)
target_field = get_real_fieldname(field, target)
if hasattr(instance, source_field) and hasattr(instance, target_field):
source_field_value = getattr(instance, source_field)
target_field_value = getattr(instance, target_field)
if target_field_value in (None, u)\
and source_field_value not in (None, u):
setattr(instance, target_field, force_unicode(source_field_value))
update_instances.add(instance)
messages.append(u"%s %s %s will become %s" % (model_full_name, instance, target_field, force_unicode(source_field_value)))
if len(update_instances):
if self.ask_for_confirmation(messages, u % (model._meta.app_label, model._meta.module_name)):
for update_instance in update_instances:
print u"saving %s" % update_instance
update_instance.save() | command execution |
382,598 | def dollarfy(x):
def _dollarfy(key, value, fmt, meta):
if key == :
return Str( + value[1] + )
return None
return walk(x, _dollarfy, , {}) | Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'. |
382,599 | def _check_forest(self, sensors):
if self in sensors:
raise ValueError(
% (self.name,))
sensors.add(self)
for parent in self._parents:
parent._check_forest(sensors) | Validate that this sensor doesn't end up referencing itself. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.