Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,500 |
def convertforoutput(self,outputfile):
assert isinstance(outputfile, CLAMOutputFile)
if not outputfile.metadata.__class__ in self.acceptforoutput:
raise Exception("Convertor " + self.__class__.__name__ + " can not convert input files to " + outputfile.metadata.__class__.__name__ + "!")
return []
|
Convert from one of the source formats into target format. Relevant if converters are used in OutputTemplates. Sourcefile is a CLAMOutputFile instance.
|
24,501 |
def tag(self, tag):
url = % (self, tag)
response = self.http.get(url, auth=self.auth)
response.raise_for_status()
return response.json()
|
Get a release by tag
|
24,502 |
def sealedbox_encrypt(data, **kwargs):
vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ=
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
|
Encrypt data using a public key generated from `nacl.keygen`.
The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key.
CLI Examples:
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
|
24,503 |
def addMenuItem( self, newItem, atItem ):
tree = self.uiMenuTREE
if ( not atItem ):
tree.addTopLevelItem(newItem)
elif ( atItem.data(0, Qt.UserRole) == ):
atItem.addChild(newItem)
elif ( atItem.parent() ):
index = atItem.parent().indexOfChild(atItem)
atItem.parent().insertChild(index + 1, newItem)
else:
index = tree.indexOfTopLevelItem(atItem)
tree.insertTopLevelItem(index + 1, newItem)
|
Adds a new menu item at the given item.
:param newItem | <QTreeWidgetItem>
atItem | <QTreeWidgetItem>
|
24,504 |
def simplex_projection(v, b=1):
r
v = np.asarray(v)
p = len(v)
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w
|
r"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2013 by Thomas Wiecki ([email protected]).
|
24,505 |
def nucnorm(x0, rho, gamma):
u, s, v = np.linalg.svd(x0, full_matrices=False)
sthr = np.maximum(s - (gamma / float(rho)), 0)
x_out = (u.dot(np.diag(sthr)).dot(v))
return x_out
|
Proximal operator for the nuclear norm (sum of the singular values of a matrix)
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
|
24,506 |
def add_rec_new(self, k, val):
self.rec_new(val)
self[k] = val
return val
|
Recursively add a new value and its children to me, and assign a
variable to it.
Args:
k (str): The name of the variable to assign.
val (LispVal): The value to be added and assigned.
Returns:
LispVal: The added value.
|
24,507 |
def set_web_master(self):
try:
self.web_master = self.soup.find().string
except AttributeError:
self.web_master = None
|
Parses the feed's webmaster and sets value
|
24,508 |
def _set_mode(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mode.mode, is_container=, presence=False, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__mode = t
if hasattr(self, ):
self._set()
|
Setter method for mode, mapped from YANG variable /interface/tunnel/mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mode() directly.
|
24,509 |
def _add_gmaf(self, variant_obj, info_dict):
for transcript in variant_obj.transcripts:
gmaf_raw = transcript.GMAF
if gmaf_raw:
gmaf = float(gmaf_raw.split()[-1])
variant_obj.add_frequency(, gmaf)
if not variant_obj.thousand_g:
variant_obj.thousand_g = gmaf
|
Add the gmaf frequency
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary
|
24,510 |
def formatdt(date: datetime.date, include_time: bool = True) -> str:
if include_time:
return date.strftime("%Y-%m-%dT%H:%M")
else:
return date.strftime("%Y-%m-%d")
|
Formats a ``datetime.date`` to ISO-8601 basic format, to minute accuracy
with no timezone (or, if ``include_time`` is ``False``, omit the time).
|
24,511 |
def plot_oneD(dataset, vars, filename, bins=60):
n = len(vars)
fig, axes = plt.subplots(nrows=n,
ncols=1,
sharex=False,
sharey=False)
for i, x in enumerate(vars):
ax = axes[i]
P = posterior.oneD(dataset+, x, limits=limits(x), bins=bins)
P.plot(ax)
ax.set_xlabel(labels(x))
ax.set_yticklabels([])
fig.set_size_inches(4, 4*n)
fig.savefig(filename, dpi=200, bbox_inches=)
plt.close(fig)
|
Plot 1D marginalised posteriors for the 'vars' of interest.
|
24,512 |
def load_observations((observations, regex, rename), path, filenames):
for filename in filenames:
if re.search(regex, filename) is None:
logging.error("Skipping {}".format(filename))
continue
obs = mpc.MPCReader().read(os.path.join(path, filename))
for ob in obs:
if "568" not in ob.observatory_code:
continue
if not isinstance(ob.comment, mpc.OSSOSComment):
continue
if ob.date < Time("2013-01-01 00:00:00"):
continue
if rename:
new_provisional_name = os.path.basename(filename)
new_provisional_name = new_provisional_name[0:new_provisional_name.find(".")]
rename_map[ob.provisional_name] = new_provisional_name
try:
key1 = ob.comment.frame.split()[0]
except Exception as ex:
logger.warning(str(ex))
logger.warning(ob.to_string())
continue
key2 = ob.provisional_name
if key1 not in observations:
observations[key1] = {}
if key2 in observations[key1]:
if observations[key1][key2]:
continue
if not observation.null_observation:
logger.error(filename)
logger.error(str(observations[key1][key2]))
raise ValueError("conflicting observations for {} in {}".format(key2, key1))
observations[key1][key2] = ob
|
Returns a provisional name based dictionary of observations of the object.
Each observations is keyed on the date. ie. a dictionary of dictionaries.
@param path: the directory where filenames are.
@type path str
@param filenames: list of files in path.
@type filenames list
@rtype None
|
24,513 |
def configure_flair(self, subreddit, flair_enabled=False,
flair_position=,
flair_self_assign=False,
link_flair_enabled=False,
link_flair_position=,
link_flair_self_assign=False):
flair_enabled = if flair_enabled else
flair_self_assign = if flair_self_assign else
if not link_flair_enabled:
link_flair_position =
link_flair_self_assign = if link_flair_self_assign else
data = {: six.text_type(subreddit),
: flair_enabled,
: flair_position,
: flair_self_assign,
: link_flair_position,
: link_flair_self_assign}
return self.request_json(self.config[], data=data)
|
Configure the flair setting for the given subreddit.
:returns: The json response from the server.
|
24,514 |
def notifyObservers(self, arg=None):
changedarg
self.mutex.acquire()
try:
if not self.changed:
return
localArray = self.obs[:]
self.clearChanged()
finally:
self.mutex.release()
for observer in localArray:
observer.update(self, arg)
|
If 'changed' indicates that this object
has changed, notify all its observers, then
call clearChanged(). Each observer has its
update() called with two arguments: this
observable object and the generic 'arg'.
|
24,515 |
def sg_min(tensor, opt):
r
return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
|
r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
|
24,516 |
def _verify_jws(self, payload, key):
jws = JWS.from_compact(payload)
try:
alg = jws.signature.combined.alg.name
except KeyError:
msg =
raise SuspiciousOperation(msg)
if alg != self.OIDC_RP_SIGN_ALGO:
msg = "The provider algorithm {!r} does not match the clientJWS token verification failed.'
raise SuspiciousOperation(msg)
return jws.payload
|
Verify the given JWS payload with the given key and return the payload
|
24,517 |
def delete(self, worker_id):
code = 200
if worker_id in self.jobs:
self.jobs[worker_id][].revoke(terminate=True)
report = {
: worker_id,
: True
}
self.jobs.pop(worker_id)
else:
report = {: .format(worker_id)}
code = 404
return flask.jsonify(report), code
|
Stop and remove a worker
|
24,518 |
def match(self, location):
if self.ssh_alias != location.ssh_alias:
return False
elif self.have_wildcards:
return fnmatch.fnmatch(location.directory, self.directory)
else:
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
|
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
|
24,519 |
async def get_user_groups(request):
acl_callback = request.get(GROUPS_KEY)
if acl_callback is None:
raise RuntimeError()
user_id = await get_auth(request)
groups = await acl_callback(user_id)
if groups is None:
return None
user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else ()
return set(itertools.chain(groups, (Group.Everyone,), user_groups))
|
Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
|
24,520 |
def get_range_slices(self, column_parent, predicate, range, consistency_level):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_range_slices(column_parent, predicate, range, consistency_level)
return d
|
returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level
|
24,521 |
def extract_ape (archive, compression, cmd, verbosity, interactive, outdir):
outfile = util.get_single_outfile(outdir, archive, extension=".wav")
return [cmd, archive, outfile, ]
|
Decompress an APE archive to a WAV file.
|
24,522 |
def get_pytorch_link(ft)->str:
"Returns link to pytorch docs of `ft`."
name = ft.__name__
ext =
if name == : return f
if name == : return f
if name.startswith():
doc_path = get_module_name(ft).replace(, )
if inspect.ismodule(ft): name = name.replace(, )
return f
if name.startswith() and inspect.ismodule(ft):
nn_link = name.replace(, )
return f
paths = get_module_name(ft).split()
if len(paths) == 1: return f
offset = 1 if paths[1] == else 0
doc_path = paths[1+offset]
if inspect.ismodule(ft): return f
fnlink = .join(paths[:(2+offset)]+[name])
return f
|
Returns link to pytorch docs of `ft`.
|
24,523 |
def delete(self):
key = self._key(self._all_keys())
_cache.delete(key)
|
Deleting any existing copy of this object from the cache.
|
24,524 |
def write(
self,
mi_cmd_to_write,
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout=True,
read_response=True,
):
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
if type(mi_cmd_to_write) in [str, unicode]:
pass
elif type(mi_cmd_to_write) == list:
mi_cmd_to_write = "\n".join(mi_cmd_to_write)
else:
raise TypeError(
"The gdb mi command must a be str or list. Got "
+ str(type(mi_cmd_to_write))
)
self.logger.debug("writing: %s", mi_cmd_to_write)
if not mi_cmd_to_write.endswith("\n"):
mi_cmd_to_write_nl = mi_cmd_to_write + "\n"
else:
mi_cmd_to_write_nl = mi_cmd_to_write
if USING_WINDOWS:
self.gdb_process.stdin.flush()
else:
self.logger.error("got unexpected fileno %d" % fileno)
if read_response is True:
return self.get_gdb_response(
timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout
)
else:
return []
|
Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines.
timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout (bool): If read_response is True, raise error if no response is received
read_response (bool): Block and read response. If there is a separate thread running,
this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
NoGdbProcessError if there is no gdb subprocess running
TypeError if mi_cmd_to_write is not valid
|
24,525 |
def names_in_chains(stream_item, aligner_data):
chain_selector = aligner_data.get(, )
assert chain_selector in _CHAIN_SELECTORS, \
% (chain_selector, _CHAIN_SELECTORS.keys())
chain_selector = _CHAIN_SELECTORS[chain_selector]
equiv_ids = make_chains_with_names( stream_item.body.sentences )
required_annotator_id = aligner_data.get()
for annotator_id, ratings in stream_item.ratings.items():
if (required_annotator_id is not None) and (annotator_id != required_annotator_id):
continue
else:
for rating in ratings:
label = Label(annotator=rating.annotator,
target=rating.target)
for eqid, (chain_mentions, chain_tokens) in equiv_ids.items():
if chain_selector(rating.mentions, chain_mentions):
for tok in chain_tokens:
add_annotation(tok, label)
|
Convert doc-level Rating object into a Label, and add that Label
to all Token in all coref chains identified by
aligner_data["chain_selector"]
:param stream_item: document that has a doc-level Rating to translate into token-level Labels.
:param aligner_data: dict containing:
chain_selector: ALL or ANY
annotator_id: string to find at stream_item.Ratings[i].annotator.annotator_id
If chain_selector==ALL, then only apply Label to chains in which
all of the Rating.mentions strings appear as substrings within at
least one of the Token.token strings.
If chain_selector==ANY, then apply Label to chains in which any of
the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
If chain_selector==ANY_MULTI_TOKEN, then apply Label to chains in which all
the names in any of the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
|
24,526 |
def get_project_children(self, project_id, name_contains, exclude_response_fields=None):
return self._get_children(, project_id, name_contains, exclude_response_fields)
|
Send GET to /projects/{project_id}/children filtering by a name.
:param project_id: str uuid of the project
:param name_contains: str name to filter folders by (if not None this method works recursively)
:param exclude_response_fields: [str]: list of fields to exclude in the response items
:return: requests.Response containing the successful result
|
24,527 |
def create_empty(self, name=None, renderers=None, RootNetworkList=None, verbose=False):
PARAMS=set_param(["name","renderers","RootNetworkList"],[name,renderers,RootNetworkList])
response=api(url=self.__url+"/create empty", PARAMS=PARAMS, method="POST", verbose=verbose)
return response
|
Create a new, empty network. The new network may be created as part of
an existing network collection or a new network collection.
:param name (string, optional): Enter the name of the new network.
:param renderers (string, optional): Select the renderer to use for the
new network view. By default, the standard Cytoscape 2D renderer (Ding)
will be used = [''],
:param RootNetworkList (string, optional): Choose the network collection
the new network should be part of. If no network collection is selected,
a new network collection is created. = [' -- Create new network collection --',
'cy:command_documentation_generation']
:param verbose: print more
|
24,528 |
def visit_default(self, node):
if not node.is_statement:
return
if not node.root().pure_python:
return
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
if (
isinstance(node.parent, nodes.TryFinally)
and node in node.parent.finalbody
):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append("")
|
check the node line number and check it if not yet done
|
24,529 |
def rollback(self):
self.logger.info("Rolling back context: %s", self.initial_offsets)
self.update_consumer_offsets(self.initial_offsets)
|
Rollback this context:
- Position the consumer at the initial offsets.
|
24,530 |
def map(source, func, *more_sources, ordered=True, task_limit=None):
if asyncio.iscoroutinefunction(func):
return amap.raw(
source, func, *more_sources,
ordered=ordered, task_limit=task_limit)
return smap.raw(source, func, *more_sources)
|
Apply a given function to the elements of one or several
asynchronous sequences.
Each element is used as a positional argument, using the same order as
their respective sources. The generation continues until the shortest
sequence is exhausted. The function can either be synchronous or
asynchronous (coroutine function).
The results can either be returned in or out of order, depending on
the corresponding ``ordered`` argument. This argument is ignored if the
provided function is synchronous.
The coroutines run concurrently but their amount can be limited using
the ``task_limit`` argument. A value of ``1`` will cause the coroutines
to run sequentially. This argument is ignored if the provided function
is synchronous.
If more than one sequence is provided, they're also awaited concurrently,
so that their waiting times don't add up.
It might happen that the provided function returns a coroutine but is not
a coroutine function per se. In this case, one can wrap the function with
``aiostream.async_`` in order to force ``map`` to await the resulting
coroutine. The following example illustrates the use ``async_`` with a
lambda function::
from aiostream import stream, async_
...
ys = stream.map(xs, async_(lambda ms: asyncio.sleep(ms / 1000)))
|
24,531 |
def get_symbol(units) -> str:
if kind(units) == "energy":
d = {}
d["nm"] = r"\lambda"
d["wn"] = r"\bar\nu"
d["eV"] = r"\hslash\omega"
d["Hz"] = r"f"
d["THz"] = r"f"
d["GHz"] = r"f"
return d.get(units, "E")
elif kind(units) == "delay":
return r"\tau"
elif kind(units) == "fluence":
return r"\mathcal{F}"
elif kind(units) == "pulse_width":
return r"\sigma"
elif kind(units) == "temperature":
return r"T"
else:
return kind(units)
|
Get default symbol type.
Parameters
----------
units_str : string
Units.
Returns
-------
string
LaTeX formatted symbol.
|
24,532 |
def tohdf5(table, source, where=None, name=None, create=False, drop=False,
description=None, title=, filters=None, expectedrows=10000,
chunkshape=None, byteorder=None, createparents=False,
sample=1000):
import tables
it = iter(table)
if create:
with _get_hdf5_file(source, mode=) as h5file:
if drop:
try:
h5file.get_node(where, name)
except tables.NoSuchNodeError:
pass
else:
h5file.remove_node(where, name)
if description is None:
peek, it = iterpeek(it, sample)
description = infer_dtype(peek)
h5file.create_table(where, name, description,
title=title,
filters=filters,
expectedrows=expectedrows,
chunkshape=chunkshape,
byteorder=byteorder,
createparents=createparents)
with _get_hdf5_table(source, where, name, mode=) as h5table:
h5table.truncate(0)
_insert(it, h5table)
|
Write to an HDF5 table. If `create` is `False`, assumes the table
already exists, and attempts to truncate it before loading. If `create`
is `True`, a new table will be created, and if `drop` is True,
any existing table will be dropped first. If `description` is `None`,
the description will be guessed. E.g.::
>>> import petl as etl
>>> table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> etl.tohdf5(table1, 'example.h5', '/testgroup', 'testtable',
... drop=True, create=True, createparents=True)
>>> etl.fromhdf5('example.h5', '/testgroup', 'testtable')
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
|
24,533 |
def get_ids(a):
a_id = % (a.rsplit(, 1)[0])
a_id_lookup = % (a.rsplit(, 1)[0])
if check(a_id) is True:
return a_id, a_id_lookup
a_id_f = open(a_id, )
a_id_lookup_f = open(a_id_lookup, )
ids = []
for seq in parse_fasta(open(a)):
id = id_generator()
while id in ids:
id = id_generator()
ids.append(id)
header = seq[0].split()[1]
name = remove_bad(header)
seq[0] = % (id, header)
print(.join(seq), file=a_id_f)
print( % (id, name, header), file=a_id_lookup_f)
return a_id, a_id_lookup
|
make copy of sequences with short identifier
|
24,534 |
def subtask(self, func, *args, **kw):
self.started_task()
try:
func(*args, **kw)
except Exception:
self.log(ERROR, % (func, args, kw))
finally:
self.finished_task()
|
Helper function for tasks needing to run subthreads. Takes care of
remembering that the subtask is running, and of cleaning up after it.
Example starting a simple.Task():
backend.subtask(task.ignore, 1, 'a', verbose=False)
*args and **kw will be propagated to `func`.
|
24,535 |
def view_change_started(self, viewNo: int):
if viewNo <= self.viewNo:
logger.warning("{}Provided view no {} is not greater"
" than the current view no {}"
.format(VIEW_CHANGE_PREFIX, viewNo, self.viewNo))
return False
self.previous_master_primary = self.node.master_primary_name
for replica in self.replicas.values():
replica.primaryName = None
return True
|
Notifies primary decider about the fact that view changed to let it
prepare for election, which then will be started from outside by
calling decidePrimaries()
|
24,536 |
def _write_with_fallback(s, write, fileobj):
if IPythonIOStream is not None and isinstance(fileobj, IPythonIOStream):
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter()
f = Writer(fileobj)
write = f.write
write(s)
return write
|
Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
|
24,537 |
def tag_exists(self, version):
for tag in self.available_tags():
if tag == version:
return True
return False
|
Check if a tag has already been created with the name of the
version.
|
24,538 |
def delete_tenant(self, tenant):
tenant_id = utils.get_id(tenant)
uri = "tenants/%s" % tenant_id
resp, resp_body = self.method_delete(uri)
if resp.status_code == 404:
raise exc.TenantNotFound("Tenant does not exist." % tenant)
|
ADMIN ONLY. Removes the tenant from the system. There is no 'undo'
available, so you should be certain that the tenant specified is the
tenant you wish to delete.
|
24,539 |
def group_children( index, shared, min_kids=10, stop_types=STOP_TYPES, delete_children=True ):
to_compress = []
for to_simplify in list(iterindex( index )):
if not isinstance( to_simplify, dict ):
continue
for typ,kids in children_types( to_simplify, index, stop_types=stop_types ).items():
kids = [k for k in kids if k and simple(k,shared, to_simplify)]
if len(kids) >= min_kids:
to_compress.append( (to_simplify,typ,kids))
for to_simplify,typ,kids in to_compress:
typ_address = new_address(index)
kid_addresses = [k[] for k in kids]
index[typ_address] = {
: typ_address,
: MANY_TYPE,
: typ,
: sum( [k.get(,0) for k in kids], 0),
: [to_simplify[]],
}
shared[typ_address] = index[typ_address][]
to_simplify[][:] = [typ_address]
if delete_children:
for address in kid_addresses:
try:
del index[address]
except KeyError, err:
pass
try:
del shared[address]
except KeyError, err:
pass
index[typ_address][] = []
else:
index[typ_address][] = kid_addresses
|
Collect like-type children into sub-groups of objects for objects with long children-lists
Only group if:
* there are more than X children of type Y
* children are "simple"
* individual children have no children themselves
* individual children have no other parents...
|
24,540 |
def from_array(array):
if array is None or not array:
return None
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = {}
if isinstance(array.get(), InputFile):
data[] = InputFile.from_array(array.get())
elif isinstance(array.get(), str):
data[] = u(array.get())
else:
raise TypeError()
if array.get() is None:
data[] = None
elif isinstance(array.get(), None):
data[] = None(array.get())
elif isinstance(array.get(), str):
data[] = u(array.get())
elif isinstance(array.get(), int):
data[] = int(array.get())
else:
raise TypeError()
if array.get() is None:
data[] = None
elif isinstance(array.get(), DEFAULT_MESSAGE_ID):
data[] = DEFAULT_MESSAGE_ID(array.get())
elif isinstance(array.get(), int):
data[] = int(array.get())
else:
raise TypeError()
data[] = bool(array.get()) if array.get() is not None else None
if array.get() is None:
data[] = None
elif isinstance(array.get(), InlineKeyboardMarkup):
data[] = InlineKeyboardMarkup.from_array(array.get())
elif isinstance(array.get(), ReplyKeyboardMarkup):
data[] = ReplyKeyboardMarkup.from_array(array.get())
elif isinstance(array.get(), ReplyKeyboardRemove):
data[] = ReplyKeyboardRemove.from_array(array.get())
elif isinstance(array.get(), ForceReply):
data[] = ForceReply.from_array(array.get())
else:
raise TypeError()
return StickerMessage(**data)
|
Deserialize a new StickerMessage from a given dictionary.
:return: new StickerMessage instance.
:rtype: StickerMessage
|
24,541 |
def _fitImg(self, img):
img = imread(img, )
if self.bg is not None:
img = cv2.subtract(img, self.bg)
if self.lens is not None:
img = self.lens.correct(img, keepSize=True)
(H, _, _, _, _, _, _, n_matches) = self.findHomography(img)
H_inv = self.invertHomography(H)
s = self.obj_shape
fit = cv2.warpPerspective(img, H_inv, (s[1], s[0]))
return fit, img, H, H_inv, n_matches
|
fit perspective and size of the input image to the reference image
|
24,542 |
def _init_libcrypto():
libcrypto = _load_libcrypto()
try:
libcrypto.OPENSSL_init_crypto()
except AttributeError:
libcrypto.OPENSSL_no_config()
libcrypto.OPENSSL_add_all_algorithms_noconf()
libcrypto.RSA_new.argtypes = ()
libcrypto.RSA_new.restype = c_void_p
libcrypto.RSA_free.argtypes = (c_void_p, )
libcrypto.RSA_size.argtype = (c_void_p)
libcrypto.BIO_new_mem_buf.argtypes = (c_char_p, c_int)
libcrypto.BIO_new_mem_buf.restype = c_void_p
libcrypto.BIO_free.argtypes = (c_void_p, )
libcrypto.PEM_read_bio_RSAPrivateKey.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSAPrivateKey.restype = c_void_p
libcrypto.PEM_read_bio_RSA_PUBKEY.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSA_PUBKEY.restype = c_void_p
libcrypto.RSA_private_encrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
libcrypto.RSA_public_decrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
return libcrypto
|
Set up libcrypto argtypes and initialize the library
|
24,543 |
async def upload_file(
self, file, *, part_size_kb=None, file_name=None, use_cache=None,
progress_callback=None):
if isinstance(file, (types.InputFile, types.InputFileBig)):
return file
if not file_name and getattr(file, , None):
file_name = file.name
if isinstance(file, str):
file_size = os.path.getsize(file)
elif isinstance(file, bytes):
file_size = len(file)
else:
if isinstance(file, io.IOBase) and file.seekable():
pos = file.tell()
else:
pos = None
if is_large:
return types.InputFileBig(file_id, part_count, file_name)
else:
return custom.InputSizedFile(
file_id, part_count, file_name, md5=hash_md5, size=file_size
)
|
Uploads the specified file and returns a handle (an instance of
:tl:`InputFile` or :tl:`InputFileBig`, as required) which can be
later used before it expires (they are usable during less than a day).
Uploading a file will simply return a "handle" to the file stored
remotely in the Telegram servers, which can be later used on. This
will **not** upload the file to your own chat or any chat at all.
Args:
file (`str` | `bytes` | `file`):
The path of the file, byte array, or stream that will be sent.
Note that if a byte array or a stream is given, a filename
or its type won't be inferred, and it will be sent as an
"unnamed application/octet-stream".
part_size_kb (`int`, optional):
Chunk size when uploading files. The larger, the less
requests will be made (up to 512KB maximum).
file_name (`str`, optional):
The file name which will be used on the resulting InputFile.
If not specified, the name will be taken from the ``file``
and if this is not a ``str``, it will be ``"unnamed"``.
use_cache (`type`, optional):
The type of cache to use (currently either :tl:`InputDocument`
or :tl:`InputPhoto`). If present and the file is small enough
to need the MD5, it will be checked against the database,
and if a match is found, the upload won't be made. Instead,
an instance of type ``use_cache`` will be returned.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(sent bytes, total)``.
Returns:
:tl:`InputFileBig` if the file size is larger than 10MB,
`telethon.tl.custom.inputsizedfile.InputSizedFile`
(subclass of :tl:`InputFile`) otherwise.
|
24,544 |
def commit(self, sha):
url = self._build_url(, sha, base_url=self._api)
json = self._json(self._get(url), 200)
return RepoCommit(json, self) if json else None
|
Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param str sha: (required), sha of the commit
:returns: :class:`RepoCommit <github3.repos.commit.RepoCommit>` if
successful, otherwise None
|
24,545 |
def pttl(self, key):
key = self._encode(key)
if key not in self.redis:
return long(-2) if self.strict else None
if key not in self.timeouts:
return long(-1) if self.strict else None
time_to_live = get_total_milliseconds(self.timeouts[key] - self.clock.now())
return long(max(-1, time_to_live))
|
Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
|
24,546 |
def get_realtime_alarm(username, auth, url):
f_url = url + "/imcrs/fault/faultRealTime?operatorName=" + username
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
realtime_alarm_list = (json.loads(response.text))
return realtime_alarm_list[][]
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) +
|
Takes in no param as input to fetch RealTime Alarms from HP IMC RESTFUL API
:param username OpeatorName, String type. Required. Default Value "admin". Checks the operator
has the privileges to view the Real-Time Alarms.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:list of dictionaries where each element of the list represents a single alarm as
pulled from the the current list of realtime alarms in the HPE IMC Platform
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.alarms import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> real_time_alarm = get_realtime_alarm('admin', auth.creds, auth.url)
>>> assert type(real_time_alarm) is list
>>> assert 'faultDesc' in real_time_alarm[0]
|
24,547 |
def run_duplicated_samples(in_prefix, in_type, out_prefix, base_dir, options):
os.mkdir(out_prefix)
required_type = "tfile"
check_input_files(in_prefix, in_type, required_type)
script_prefix = os.path.join(out_prefix, "dup_samples")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
try:
duplicated_samples.main(options)
except duplicated_samples.ProgramError as e:
msg = "duplicated_samples: {}".format(e)
raise ProgramError(msg)
duplicated_count = defaultdict(int)
if os.path.isfile(script_prefix + ".duplicated_samples.tfam"):
with open(script_prefix + ".duplicated_samples.tfam", "r") as i_file:
duplicated_count = Counter([
tuple(createRowFromPlinkSpacedOutput(line)[:2])
for line in i_file
])
zeroed_out = defaultdict(int)
if os.path.isfile(script_prefix + ".zeroed_out"):
with open(script_prefix + ".zeroed_out", "r") as i_file:
zeroed_out = Counter([
tuple(line.rstrip("\r\n").split("\t")[:2])
for line in i_file.read().splitlines()[1:]
])
nb_zeroed_out = sum(zeroed_out.values())
not_good_enough = set()
if os.path.isfile(script_prefix + ".not_good_enough"):
with open(script_prefix + ".not_good_enough", "r") as i_file:
not_good_enough = {
tuple(line.rstrip("\r\n").split("\t")[:4])
for line in i_file.read().splitlines()[1:]
}
chosen_sample = set()
if os.path.isfile(script_prefix + ".chosen_samples.info"):
with open(script_prefix + ".chosen_samples.info", "r") as i_file:
chosen_sample = {
tuple(line.rstrip("\r\n").split("\t"))
for line in i_file.read().splitlines()[1:]
}
not_good_still = {s[2:] for s in chosen_sample & not_good_enough}
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
duplicated_samples.pretty_name
)
text = (
"A total of {:,d} duplicated sample{} {} found.".format(
len(duplicated_count),
"s" if len(duplicated_count) > 1 else "",
"were" if len(duplicated_count) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
if len(duplicated_count) > 0:
text = (
"While merging duplicates, a total of {:,d} genotype{} {} "
"zeroed out. A total of {:,d} sample{} {} found to be not "
"good enough for duplicate completion.".format(
nb_zeroed_out,
"s" if nb_zeroed_out > 1 else "",
"were" if nb_zeroed_out > 1 else "was",
len(not_good_enough),
"s" if len(not_good_enough) > 1 else "",
"were" if len(not_good_enough) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
table_label = re.sub(
r"[/\\]",
"_",
script_prefix,
) + "_dup_samples"
text = (
r"Table~\ref{" + table_label + "} summarizes the number "
"of each duplicated sample with some characteristics."
)
print >>o_file, latex_template.wrap_lines(text)
if len(not_good_still) > 0:
text = latex_template.textbf(
"There {} {:,d} sample{} that {} not good due to low "
"completion or concordance, but {} still selected as "
"the best duplicate (see Table~{}).".format(
"were" if len(not_good_still) > 1 else "was",
len(not_good_still),
"s" if len(not_good_still) > 1 else "",
"were" if len(not_good_still) > 1 else "was",
"were" if len(not_good_still) > 1 else "was",
r"~\ref{" + table_label + "}",
)
)
print >>o_file, latex_template.wrap_lines(text)
longtable_template = latex_template.jinja2_env.get_template(
"longtable_template.tex",
)
table_caption = (
"Summary of the {:,d} duplicated sample{}. The number of "
"duplicates and the total number of zeroed out genotypes "
"are shown.".format(
len(duplicated_count),
"s" if len(duplicated_count) > 1 else "",
)
)
if len(not_good_still) > 0:
table_caption += (
" A total of {:,d} sample{} (highlighted) {} not good "
"enough for completion, but {} chosen as the best "
"duplicate, and {} still in the final "
"dataset).".format(
len(not_good_still),
"s" if len(not_good_still) > 1 else "",
"were" if len(not_good_still) > 1 else "was",
"were" if len(not_good_still) > 1 else "was",
"are" if len(not_good_still) > 1 else "is",
)
)
duplicated_samples_list = duplicated_count.most_common()
print >>o_file, longtable_template.render(
table_caption=table_caption,
table_label=table_label,
nb_col=4,
col_alignments="llrr",
text_size="scriptsize",
header_data=[("FID", 1), ("IID", 1), ("Nb Duplicate", 1),
("Nb Zeroed", 1)],
tabular_data=[
[latex_template.sanitize_tex(fid),
latex_template.sanitize_tex(iid),
"{:,d}".format(nb),
"{:,d}".format(zeroed_out[(fid, iid)])]
for (fid, iid), nb in duplicated_samples_list
],
highlighted=[
(fid, iid) in not_good_still
for fid, iid in [i[0] for i in duplicated_samples_list]
],
)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "
counter = Counter(duplicated_count.values()).most_common()
if counter:
print >>o_file, "Number of replicated samples"
else:
print >>o_file, "Number of replicated samples\t0"
for rep_type, rep_count in counter:
print >>o_file, " - x{}\t{:,d}\t\t-{:,d}".format(
rep_type,
rep_count,
(rep_count * rep_type) - rep_count,
)
print >>o_file, ("Poorly chosen replicated "
"samples\t{:,d}".format(len(not_good_still)))
print >>o_file, "---"
return _StepResult(
next_file=os.path.join(out_prefix, "dup_samples.final"),
next_file_type="tfile",
latex_summary=latex_file,
description=duplicated_samples.desc,
long_description=duplicated_samples.long_desc,
graph_path=None,
)
|
Runs step1 (duplicated samples).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the :py:mod:`pyGenClean.DupSamples.duplicated_samples`
module. The required file type for this module is ``tfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
|
24,548 |
def validate_request_table(self, request):
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request)
|
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
|
24,549 |
def __run_pre_all(self):
for d in reversed(self.dirs):
pre_all_py_path = os.path.join(d, )
if os.path.isfile(pre_all_py_path):
print(, end=)
self.__run_py_file(pre_all_py_path, )
print()
pre_all_sql_path = os.path.join(d, )
if os.path.isfile(pre_all_sql_path):
print(, end=)
self.__run_sql_file(pre_all_sql_path)
print()
|
Execute the pre-all.py and pre-all.sql files if they exist
|
24,550 |
def plotstat(data, circleinds=None, crossinds=None, edgeinds=None, url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
fields = [, , , , , ]
if not circleinds: circleinds = range(len(data[]))
datalen = len(data[])
inds = circleinds + crossinds + edgeinds
specstd = [data[][i] for i in inds]
specstd_min = min(specstd)
specstd_max = max(specstd)
imkur = [data[][i] for i in inds]
imkur_min = min(imkur)
imkur_max = max(imkur)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label=,
y_axis_label=, x_range=(specstd_min, specstd_max),
y_range=(imkur_min, imkur_max), tools=tools, output_backend=)
stat.circle(, , size=, line_color=None, fill_color=, fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.cross(, , size=, line_color=, line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.circle(, , size=, line_color=, fill_color=, source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = stat.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([(, ), (, )])
if url_path and fileroot:
url = .format(url_path, fileroot)
taptool = stat.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return stat
|
Make a light-weight stat figure
|
24,551 |
def request_camera_sensors(blink, network, camera_id):
url = "{}/network/{}/camera/{}/signals".format(blink.urls.base_url,
network,
camera_id)
return http_get(blink, url)
|
Request camera sensor info for one camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to request sesnor info from.
|
24,552 |
def prompt_save_images(args):
if args[] or args[]:
return
if (args[] or args[]) and (args[] or args[]):
save_msg = (
)
try:
save_images = utils.confirm_input(input(save_msg))
except (KeyboardInterrupt, EOFError):
return
args[] = save_images
args[] = not save_images
|
Prompt user to save images when crawling (for pdf and HTML formats).
|
24,553 |
def build_calmjs_artifacts(dist, key, value, cmdclass=BuildCommand):
if value is not True:
return
build_cmd = dist.get_command_obj()
if not isinstance(build_cmd, cmdclass):
logger.error(
" command in Distribution is not an instance of "
" (got %r instead)",
cmdclass.__module__, cmdclass.__name__, build_cmd)
return
build_cmd.sub_commands.append((key, has_calmjs_artifact_declarations))
|
Trigger the artifact build process through the setuptools.
|
24,554 |
def update(self, client=None, unique_writer_identity=False):
client = self._require_client(client)
resource = client.sinks_api.sink_update(
self.project,
self.name,
self.filter_,
self.destination,
unique_writer_identity=unique_writer_identity,
)
self._update_from_api_repr(resource)
|
API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
|
24,555 |
def get_signature(func):
try:
return object.__getattribute__(func, )
except AttributeError:
pass
try:
if sys.version_info[0] < 3:
names, _, varkw, defaults = inspect.getargspec(func)
else:
names, _, varkw, defaults, _, _, _ = inspect.getfullargspec(func)
except TypeError:
return None
first_arg_index = 1 if inspect.ismethod(func) else 0
number_of_defaults = len(defaults) if defaults else 0
if number_of_defaults > 0:
required = .join(sorted(names[first_arg_index:-number_of_defaults]))
optional = .join(sorted(names[-number_of_defaults:]))
else:
required = .join(sorted(names[first_arg_index:]))
optional =
wildcard = if varkw is not None else
signature = .join((required, optional, wildcard))
try:
object.__setattr__(func, , signature)
except TypeError:
type.__setattr__(func, , signature)
except AttributeError:
pass
return signature
|
:type func: Callable
:rtype: str
|
24,556 |
def contact(request):
form = ContactForm(request.POST or None)
if form.is_valid():
subject = form.cleaned_data[]
message = form.cleaned_data[]
sender = form.cleaned_data[]
cc_myself = form.cleaned_data[]
recipients = settings.CONTACTFORM_RECIPIENTS
if cc_myself:
recipients.append(sender)
send_mail(getattr(settings, "CONTACTFORM_SUBJECT_PREFIX", ) + subject, message, sender, recipients)
return render(request, )
return render( request, , {: form})
|
Displays the contact form and sends the email
|
24,557 |
def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
pred_bbox = pred_boxlist.bbox.numpy()
pred_label = pred_boxlist.get_field("labels").numpy()
pred_score = pred_boxlist.get_field("scores").numpy()
gt_bbox = gt_boxlist.bbox.numpy()
gt_label = gt_boxlist.get_field("labels").numpy()
gt_difficult = gt_boxlist.get_field("difficult").numpy()
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
n_pos[l] += np.logical_not(gt_difficult_l).sum()
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
continue
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = boxlist_iou(
BoxList(pred_bbox_l, gt_boxlist.size),
BoxList(gt_bbox_l, gt_boxlist.size),
).numpy()
gt_index = iou.argmax(axis=1)
gt_index[iou.max(axis=1) < iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
match[l].append(-1)
else:
if not selec[gt_idx]:
match[l].append(1)
else:
match[l].append(0)
selec[gt_idx] = True
else:
match[l].append(0)
n_fg_class = max(n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys():
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
prec[l] = tp / (fp + tp)
if n_pos[l] > 0:
rec[l] = tp / n_pos[l]
return prec, rec
|
Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
|
24,558 |
def add_file(self, filepath, gzip=False, cache_name=None):
command =
if gzip:
command +=
cache_name = cache_name or
value = % (cache_name, filepath)
self._set(command, value.strip(), multi=True)
return self._section
|
Load a static file in the cache.
.. note:: Items are stored with the filepath as is (relative or absolute) as the key.
:param str|unicode filepath:
:param bool gzip: Use gzip compression.
:param str|unicode cache_name: If not set, default will be used.
|
24,559 |
def _get_mixing_indices(size, seed=None, name=None):
with tf.compat.v1.name_scope(
name, default_name=, values=[size]):
size = tf.convert_to_tensor(value=size)
dtype = size.dtype
seed_stream = distributions.SeedStream(seed, salt=)
first = tf.random.uniform([size],
maxval=size-1,
dtype=dtype,
seed=seed_stream())
second = tf.random.uniform([size],
maxval=size-2,
dtype=dtype,
seed=seed_stream())
third = tf.random.uniform([size],
maxval=size-3,
dtype=dtype,
seed=seed_stream())
second = tf.where(first < second, x=second, y=second + 1)
smaller = tf.math.minimum(first, second)
larger = tf.math.maximum(first, second)
third = tf.where(third < smaller, x=third, y=third + 1)
third = tf.where(third < larger, x=third, y=third + 1)
sample = tf.stack([first, second, third], axis=1)
to_avoid = tf.expand_dims(tf.range(size), axis=-1)
sample = tf.where(sample < to_avoid, x=sample, y=sample + 1)
return sample
|
Generates an array of indices suitable for mutation operation.
The mutation operation in differential evolution requires that for every
element of the population, three distinct other elements be chosen to produce
a trial candidate. This function generates an array of shape [size, 3]
satisfying the properties that:
(a). array[i, :] does not contain the index 'i'.
(b). array[i, :] does not contain any overlapping indices.
(c). All elements in the array are between 0 and size - 1 inclusive.
Args:
size: Scalar integer `Tensor`. The number of samples as well as a the range
of the indices to sample from.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'get_mixing_indices'.
Returns:
sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing
samples without replacement between 0 and size - 1 (inclusive) with the
`i`th row not including the number `i`.
|
24,560 |
def post_translation(self, query, bug):
ignore = query
if in bug and "components" not in bug:
val = bug[]
bug[] = isinstance(val, list) and val or [val]
bug[] = bug[][0]
if in bug and "versions" not in bug:
val = bug[]
bug[] = isinstance(val, list) and val or [val]
bug[] = bug[][0]
|
Convert the results of getbug back to the ancient RHBZ value
formats
|
24,561 |
def setValue(self, key, value):
if self._customFormat:
self._customFormat.setValue(key, value)
else:
super(XSettings, self).setValue(key, wrapVariant(value))
|
Sets the value for the given key to the inputed value.
:param key | <str>
value | <variant>
|
24,562 |
def getXY(self, debug=False):
t call self.getX() or self.getY() inside this method
or it will enter an infinite loop
@return: The I{screen} coordinates of this C{View}
Hierarchy accumulated X Hierarchy accumulated Y com.android.internal.widget.ActionBarViewcom.android.internal.widget.ActionBarContextViewcom.android.internal.view.menu.ActionMenuViewcom.android.internal.policy.impl.PhoneWindow$DecorViews very unlikely that fw.wvy < sbh, that is a window over the statusbar
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: yes, considering offset=", sbh
statusBarOffset = sbh
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: no, ignoring statusbar offset fw.wvy=", fw.wvy, ">", sbh
if fw.py == fw.wvy:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: but wait, fw.py == fw.wvy so we are adjusting by ", (fw.px, fw.py)
pwx = fw.px
pwy = fw.py
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: fw.py=%d <= fw.wvy=%d, no adjustment" % (fw.py, fw.wvy)
if DEBUG_COORDS or DEBUG_STATUSBAR or debug:
print >>sys.stderr, " getXY: returning (%d, %d) ***" % (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy)
print >>sys.stderr, " x=%d+%d+%d+%d" % (x,hx,wvx,pwx)
print >>sys.stderr, " y=%d+%d+%d-%d+%d" % (y,hy,wvy,statusBarOffset,pwy)
return (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy)
|
Returns the I{screen} coordinates of this C{View}.
WARNING: Don't call self.getX() or self.getY() inside this method
or it will enter an infinite loop
@return: The I{screen} coordinates of this C{View}
|
24,563 |
def patch_config(config, data):
is_changed = False
for name, value in data.items():
if value is None:
if config.pop(name, None) is not None:
is_changed = True
elif name in config:
if isinstance(value, dict):
if isinstance(config[name], dict):
if patch_config(config[name], value):
is_changed = True
else:
config[name] = value
is_changed = True
elif str(config[name]) != str(value):
config[name] = value
is_changed = True
else:
config[name] = value
is_changed = True
return is_changed
|
recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed
|
24,564 |
def do_filter(self, arg):
if arg == "list":
msg.info("TEST CASE FILTERS")
for f in self.curargs["tfilter"]:
if f == "*":
msg.info(" * (default, matches all)")
else:
msg.info(" " + f)
elif arg not in self.curargs["tfilter"]:
self.curargs["tfilter"].append(arg)
self.do_filter("list")
|
Sets the filter for the test cases to include in the plot/table by name. Only those
test cases that include this text are included in plots, tables etc.
|
24,565 |
def ListLanguageIdentifiers(self):
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=[, ],
title=)
for language_id, value_list in sorted(
language_ids.LANGUAGE_IDENTIFIERS.items()):
table_view.AddRow([language_id, value_list[1]])
table_view.Write(self._output_writer)
|
Lists the language identifiers.
|
24,566 |
def _sanitize_column(self, key, value, **kwargs):
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise ValueError(
)
clean = value
elif hasattr(value, ):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise ValueError(
)
clean = sp_maker(value)
else:
clean = sp_maker(value, self.index)
return clean
|
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
|
24,567 |
def fit_mle(self, data, b=None):
data = np.array(data)
length = len(data)
if not b:
b = np.sum(data)
return _trunc_logser_solver(length, b), b
|
%(super)s
b : float
The upper bound of the distribution. If None, fixed at sum(data)
|
24,568 |
def search(query, medium, credentials):
helpers.check_creds(credentials, header)
if len(query) == 0:
raise ValueError(constants.INVALID_EMPTY_QUERY)
api_query = helpers.get_query_url(medium, query)
if api_query is None:
raise ValueError(constants.INVALID_MEDIUM)
search_resp = requests.get(api_query, auth=credentials, headers=header)
if search_resp is None or search_resp.status_code == 204:
return []
query_soup = BeautifulSoup(search_resp.text, )
if medium == tokens.Medium.ANIME:
entries = query_soup.anime
if entries is None:
return helpers.reschedule(search, constants.DEFAULT_WAIT_SECS, query,
medium, credentials)
return [objects.Anime(entry) for entry in entries.findAll()]
elif medium == tokens.Medium.MANGA:
entries = query_soup.manga
if entries is None:
return helpers.reschedule(search, constants.DEFAULT_WAIT_SECS, query,
medium)
return [objects.Manga(entry) for entry in entries.findAll()]
|
Searches MyAnimeList for a [medium] matching the keyword(s) given by query.
:param query The keyword(s) to search with.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:return A list of all items that are of type [medium] and match the
given keywords, or, an empty list if none matched.
:raise ValueError For bad arguments.
|
24,569 |
def discover_by_name(input_directory, output_directory):
metric_list = []
log_files = os.listdir(input_directory)
for log_file in log_files:
if log_file in CONSTANTS.SUPPORTED_FILENAME_MAPPING.keys():
metric_list.append(initialize_metric(CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], [log_file], None, [], output_directory, CONSTANTS.RESOURCE_PATH,
CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], None, None, {}, None, None, {}))
else:
logger.warning(, log_file)
return metric_list
|
Auto discover metric types from the files that exist in input_directory and return a list of metrics
:param: input_directory: The location to scan for log files
:param: output_directory: The location for the report
|
24,570 |
def train(self, inputData, numIterations, reset=False):
if not isinstance(inputData, np.ndarray):
inputData = np.array(inputData)
if reset:
self._reset()
for _ in xrange(numIterations):
self._iteration += 1
batch = self._getDataBatch(inputData)
if batch.shape[0] != self.filterDim:
raise ValueError("Batches and filter dimesions don't match!")
activations = self.encode(batch)
self._learn(batch, activations)
if self._iteration % self.decayCycle == 0:
self.learningRate *= self.learningRateDecay
if self.verbosity >= 1:
self.plotLoss()
self.plotBasis()
|
Trains the SparseNet, with the provided data.
The reset parameter can be set to False if the network should not be
reset before training (for example for continuing a previous started
training).
:param inputData: (array) Input data, of dimension (inputDim, numPoints)
:param numIterations: (int) Number of training iterations
:param reset: (bool) If set to True, reset basis and history
|
24,571 |
def flip(self,inplace=False):
if inplace:
self._orb.vxvv[1]= -self._orb.vxvv[1]
if len(self._orb.vxvv) > 2:
self._orb.vxvv[2]= -self._orb.vxvv[2]
if len(self._orb.vxvv) > 4:
self._orb.vxvv[4]= -self._orb.vxvv[4]
if hasattr(self._orb,):
self._orb.orbit[:,1]= -self._orb.orbit[:,1]
if len(self._orb.vxvv) > 2:
self._orb.orbit[:,2]= -self._orb.orbit[:,2]
if len(self._orb.vxvv) > 4:
self._orb.orbit[:,4]= -self._orb.orbit[:,4]
if hasattr(self._orb,"_orbInterp"):
delattr(self._orb,"_orbInterp")
return None
orbSetupKwargs= {:None,
:None,
:self._orb._zo,
:self._orb._solarmotion}
if self._orb._roSet:
orbSetupKwargs[]= self._orb._ro
if self._orb._voSet:
orbSetupKwargs[]= self._orb._vo
if len(self._orb.vxvv) == 2:
return Orbit(vxvv= [self._orb.vxvv[0],-self._orb.vxvv[1]],
**orbSetupKwargs)
elif len(self._orb.vxvv) == 3:
return Orbit(vxvv=[self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2]],**orbSetupKwargs)
elif len(self._orb.vxvv) == 4:
return Orbit(vxvv=[self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2],self._orb.vxvv[3]],
**orbSetupKwargs)
elif len(self._orb.vxvv) == 5:
return Orbit(vxvv=[self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2],self._orb.vxvv[3],
-self._orb.vxvv[4]],**orbSetupKwargs)
elif len(self._orb.vxvv) == 6:
return Orbit(vxvv= [self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2],self._orb.vxvv[3],
-self._orb.vxvv[4],self._orb.vxvv[5]],
**orbSetupKwargs)
|
NAME:
flip
PURPOSE:
'flip' an orbit's initial conditions such that the velocities are minus the original velocities; useful for quick backward integration; returns a new Orbit instance
INPUT:
inplace= (False) if True, flip the orbit in-place, that is, without returning a new instance and also flip the velocities of the integrated orbit (if it exists)
OUTPUT:
Orbit instance that has the velocities of the current orbit flipped (inplace=False) or just flips all velocities of current instance (inplace=True)
HISTORY:
2014-06-17 - Written - Bovy (IAS)
2016-07-21 - Added inplace keyword - Bovy (UofT)
|
24,572 |
def replies(self):
if self._replies is None or not self._has_fetched_replies:
response = self.reddit_session.request_json(self._fast_permalink)
if response[1][][]:
self._replies = response[1][][][0]._replies
else:
return self._replies
|
Return a list of the comment replies to this comment.
If the comment is not from a submission, :meth:`replies` will
always be an empty list unless you call :meth:`refresh()
before calling :meth:`replies` due to a limitation in
reddit's API.
|
24,573 |
def transaction_search_query(self, transaction_type, **kwargs):
if not transaction_type:
raise ValueError()
if not kwargs.get() and (not kwargs.get() or not kwargs.get()):
raise ValueError(
)
return self._query(TRANSACTION_SEARCH_API_URL.format(transaction_type), **kwargs)
|
Query the Yelp Transaction Search API.
documentation: https://www.yelp.com/developers/documentation/v3/transactions_search
required parameters:
* transaction_type - transaction type
* one of either:
* location - text specifying a location to search for
* latitude and longitude
|
24,574 |
def get_group_members(group_name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
marker = None
truncated = True
users = []
while truncated:
info = conn.get_group(group_name, marker=marker, max_items=1000)
if not info:
return False
truncated = bool(info[][][])
if truncated and in info[][]:
marker = info[][][]
else:
marker = None
truncated = False
users += info[][][]
return users
except boto.exception.BotoServerError as e:
log.debug(e)
log.error(, group_name)
return False
|
Get group information.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_group mygroup
|
24,575 |
def set_sort_function(sortable, callback, column=0):
sortable.set_default_sort_func(
lambda tree, itera, iterb: _normalize(callback(
tree.get_value(itera, column),
tree.get_value(iterb, column)
))
)
|
*sortable* is a :class:`gtk.TreeSortable` instance.
*callback* will be passed two items and must return a value like
the built-in `cmp`.
*column* is an integer adressing the column that holds items.
This will re-sort even if *callback* is the same as before.
.. note::
When sorting a `ListStore` without a `TreeModelSort` you have to call
`set_sort_column_id(-1, gtk.SORT_ASCENDING)` once, *after* this.
|
24,576 |
def user_post_save(sender, **kwargs):
if kwargs.get("raw", False):
return False
user, created = kwargs["instance"], kwargs["created"]
disabled = getattr(user, "_disable_account_creation", not settings.ACCOUNT_CREATE_ON_SAVE)
if created and not disabled:
Account.create(user=user)
|
After User.save is called we check to see if it was a created user. If so,
we check if the User object wants account creation. If all passes we
create an Account object.
We only run on user creation to avoid having to check for existence on
each call to User.save.
|
24,577 |
def experiments_fmri_upsert_property(self, experiment_id, properties):
fmri = self.experiments_fmri_get(experiment_id)
if fmri is None:
return None
return self.funcdata.upsert_object_property(fmri.identifier, properties)
|
Upsert property of fMRI data object associated with given experiment.
Raises ValueError if given property dictionary results in an illegal
operation.
Parameters
----------
experiment_id : string
Unique experiment identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
FMRIDataHandle
Handle for updated object of None if object doesn't exist
|
24,578 |
def extend(self, definitions):
for name, definition in dict(definitions).items():
self.add(name, definition)
|
Add several definitions at once. Existing definitions are
replaced silently.
:param definitions: The names and definitions.
:type definitions: a :term:`mapping` or an :term:`iterable` with
two-value :class:`tuple` s
|
24,579 |
def create_new_client(self, filename=None, give_focus=True):
if not filename:
if not osp.isdir(NOTEBOOK_TMPDIR):
os.makedirs(NOTEBOOK_TMPDIR)
nb_name = + str(self.untitled_num) +
filename = osp.join(NOTEBOOK_TMPDIR, nb_name)
nb_contents = nbformat.v4.new_notebook()
nbformat.write(nb_contents, filename)
self.untitled_num += 1
"check for errors."))
self.untitled_num -= 1
self.create_welcome_client()
return
welcome_client = self.create_welcome_client()
client = NotebookClient(self, filename)
self.add_tab(client)
if NOTEBOOK_TMPDIR not in filename:
self.add_to_recent(filename)
self.setup_menu_actions()
client.register(server_info)
client.load_notebook()
if welcome_client and not self.testing:
self.tabwidget.setCurrentIndex(0)
|
Create a new notebook or load a pre-existing one.
|
24,580 |
def read_json(fp, local_files, dir_files, name_bytes):
if name_bytes in dir_files:
json_pos = local_files[dir_files[name_bytes][1]][1]
json_len = local_files[dir_files[name_bytes][1]][2]
fp.seek(json_pos)
json_properties = fp.read(json_len)
return json.loads(json_properties.decode("utf-8"))
return None
|
Read json properties from the zip file
:param fp: a file pointer
:param local_files: the local files structure
:param dir_files: the directory headers
:param name: the name of the json file to read
:return: the json properites as a dictionary, if found
The file pointer will be at a location following the
local file entry after this method.
The local_files and dir_files should be passed from
the results of parse_zip.
|
24,581 |
def is_eligible(self, segment):
if not segment or not segment.sampled:
return False
return segment.get_total_subsegments_size() > self.streaming_threshold
|
A segment is eligible to have its children subsegments streamed
if it is sampled and it breaches streaming threshold.
|
24,582 |
def unflagging_question(self, id, attempt, validation_token, quiz_submission_id, access_code=None):
path = {}
data = {}
params = {}
path["quiz_submission_id"] = quiz_submission_id
path["id"] = id
data["attempt"] = attempt
data["validation_token"] = validation_token
if access_code is not None:
data["access_code"] = access_code
self.logger.debug("PUT /api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag".format(**path), data=data, params=params, no_data=True)
|
Unflagging a question.
Remove the flag that you previously set on a quiz question after you've
returned to it.
|
24,583 |
def int_bytes(cls, string):
if string[-1] in (, ):
value = cls.int_0_inf(string[:-1])
unit = string[-1]
if unit == :
value *= 2 ** 10
else:
value *= 2 ** 20
return value
else:
return cls.int_0_inf(string)
|
Convert string describing size to int.
|
24,584 |
def getaddrinfo(node, service=0, family=0, socktype=0, protocol=0, flags=0, timeout=30):
hub = get_hub()
with switch_back(timeout) as switcher:
request = pyuv.dns.getaddrinfo(hub.loop, node, service, family,
socktype, protocol, flags, callback=switcher)
switcher.add_cleanup(request.cancel)
result = hub.switch()
result, error = result[0]
if error:
message = pyuv.errno.strerror(error)
raise pyuv.error.UVError(error, message)
return result
|
Resolve an Internet *node* name and *service* into a socket address.
The *family*, *socktype* and *protocol* are optional arguments that specify
the address family, socket type and protocol, respectively. The *flags*
argument allows you to pass flags to further modify the resolution process.
See the :func:`socket.getaddrinfo` function for a detailed description of
these arguments.
The return value is a list of ``(family, socktype, proto, canonname,
sockaddr)`` tuples. The fifth element (``sockaddr``) is the socket address.
It will be a 2-tuple ``(addr, port)`` for an IPv4 address, and a 4-tuple
``(addr, port, flowinfo, scopeid)`` for an IPv6 address.
The address resolution is performed in the libuv thread pool.
|
24,585 |
def attached_socket(self, *args, **kwargs):
try:
sock = self.attach(*args, **kwargs)
yield sock
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
|
Opens a raw socket in a ``with`` block to write data to Splunk.
The arguments are identical to those for :meth:`attach`. The socket is
automatically closed at the end of the ``with`` block, even if an
exception is raised in the block.
:param host: The host value for events written to the stream.
:type host: ``string``
:param source: The source value for events written to the stream.
:type source: ``string``
:param sourcetype: The sourcetype value for events written to the
stream.
:type sourcetype: ``string``
:returns: Nothing.
**Example**::
import splunklib.client as client
s = client.connect(...)
index = s.indexes['some_index']
with index.attached_socket(sourcetype='test') as sock:
sock.send('Test event\\r\\n')
|
24,586 |
async def blow_out(self, mount):
this_pipette = self._attached_instruments[mount]
if not this_pipette:
raise top_types.PipetteNotAttachedError(
"No pipette attached to {} mount".format(mount.name))
self._backend.set_active_current(Axis.of_plunger(mount),
this_pipette.config.plunger_current)
try:
await self._move_plunger(
mount, this_pipette.config.blow_out)
except Exception:
self._log.exception()
raise
finally:
this_pipette.set_current_volume(0)
|
Force any remaining liquid to dispense. The liquid will be dispensed at
the current location of pipette
|
24,587 |
def hash_input(inpt, algo=HASH_SHA256):
if (algo == HASH_MD5):
hashcode = hashlib.md5()
elif (algo == HASH_SHA1):
hashcode = hashlib.sha1()
elif (algo == HASH_SHA224):
hashcode = hashlib.sha224()
elif (algo == HASH_SHA256):
hashcode = hashlib.sha256()
elif (algo == HASH_SHA384):
hashcode = hashlib.sha384()
elif (algo == HASH_SHA512):
hashcode = hashlib.sha512()
if sys.version_info.major == 2:
inpt = bytes(inpt)
else:
inpt = bytes(inpt, "utf-8")
hashcode.update(inpt)
hexhash = hashcode.hexdigest()
return hexhash
|
Generates a hash from a given String
with a specified algorithm and returns the
hash in hexadecimal form. Default: sha256.
|
24,588 |
def profile_poly2o(data, mask):
params = lmfit.Parameters()
params.add(name="mx", value=0)
params.add(name="my", value=0)
params.add(name="mxy", value=0)
params.add(name="ax", value=0)
params.add(name="ay", value=0)
params.add(name="off", value=np.average(data[mask]))
fr = lmfit.minimize(poly2o_residual, params, args=(data, mask))
bg = poly2o_model(fr.params, data.shape)
return bg
|
Fit a 2D 2nd order polynomial to `data[mask]`
|
24,589 |
def to_dict(self):
final_res = {
"param": self._param,
"unused_param": self.unused_param,
"execution_time": self._exec_time,
"output": {"accuracy": self.get_accuracy(),
"weights": self.get_weights(),
"splines": self._splines
}
}
return final_res
|
Returns:
dict: Concise represented as a dictionary.
|
24,590 |
def get_extended_attrtext(value):
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, )
_validate_xtext(attrtext)
return attrtext, value
|
attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
|
24,591 |
def add_column_xsd(self, tb, column, attrs):
if column.nullable:
attrs[] = str(0)
attrs[] =
for cls, xsd_type in six.iteritems(self.SIMPLE_XSD_TYPES):
if isinstance(column.type, cls):
attrs[] = xsd_type
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, Geometry):
geometry_type = column.type.geometry_type
xsd_type = self.SIMPLE_GEOMETRY_XSD_TYPES[geometry_type]
attrs[] = xsd_type
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Enum):
with tag(tb, , attrs) as tb:
with tag(tb, ) as tb:
with tag(tb, , {: }) \
as tb:
for enum in column.type.enums:
with tag(tb, , {: enum}):
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Numeric):
if column.type.scale is None and column.type.precision is None:
attrs[] =
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, , attrs) as tb:
with tag(tb, ) as tb:
with tag(tb, ,
{: }) as tb:
if column.type.scale is not None:
with tag(tb, ,
{: str(column.type.scale)}) \
as tb:
pass
if column.type.precision is not None:
precision = column.type.precision
with tag(tb, ,
{: str(precision)}) \
as tb:
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.String) \
or isinstance(column.type, sqlalchemy.Text) \
or isinstance(column.type, sqlalchemy.Unicode) \
or isinstance(column.type, sqlalchemy.UnicodeText):
if column.type.length is None:
attrs[] =
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, , attrs) as tb:
with tag(tb, ) as tb:
with tag(tb, ,
{: }) as tb:
with tag(tb, ,
{: str(column.type.length)}):
pass
self.element_callback(tb, column)
return tb
raise UnsupportedColumnTypeError(column.type)
|
Add the XSD for a column to tb (a TreeBuilder)
|
24,592 |
def find_environment(env_id=None, env_name=None):
LOGGER.debug("EnvironmentService.find_environment")
if (env_id is None or not env_id) and (env_name is None or not env_name):
raise exceptions.ArianeCallParametersError()
if (env_id is not None and env_id) and (env_name is not None and env_name):
LOGGER.warn()
env_name = None
params = None
if env_id is not None and env_id:
params = {: env_id}
elif env_name is not None and env_name:
params = {: env_name}
ret = None
if params is not None:
args = {: , : , : params}
response = EnvironmentService.requester.call(args)
if response.rc == 0:
ret = Environment.json_2_environment(response.response_content)
elif response.rc != 404:
err_msg = + \
str(env_id) + + str(env_name) + + \
+ str(response.response_content) + + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(
err_msg
)
return ret
|
find the environment according environment id (prioritary) or environment name
:param env_id: the environment id
:param env_name: the environment name
:return: found environment or None if not found
|
24,593 |
def gen_sites(path):
" Seek sites by path. "
for root, _, _ in walklevel(path, 2):
try:
yield Site(root)
except AssertionError:
continue
|
Seek sites by path.
|
24,594 |
def get_config_dict(config):
dst = {}
tmp = config.__dict__
key_list = dir(config)
key_list.remove()
for k, v in tmp.items():
if k in key_list and not k.startswith():
dst[k] = v
return dst
|
获取配置数据字典
对传入的配置包进行格式化处理,生成一个字典对象
:param object config: 配置模块
:return: 配置数据字典
:rtype: dict
|
24,595 |
def flatten(d, parent_key=, separator=):
items = []
for k, v in d.items():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, (dict, OrderedDict)):
items.extend(flatten(v, new_key, separator).items())
else:
items.append((new_key, v))
return OrderedDict(items)
|
Flatten a nested dictionary.
Parameters
----------
d: dict_like
Dictionary to flatten.
parent_key: string, optional
Concatenated names of the parent keys.
separator: string, optional
Separator between the names of the each key.
The default separator is '_'.
Examples
--------
>>> d = {'alpha': 1, 'beta': {'a': 10, 'b': 42}}
>>> flatten(d) == {'alpha': 1, 'beta_a': 10, 'beta_b': 42}
True
>>> flatten(d, separator='.') == {'alpha': 1, 'beta.a': 10, 'beta.b': 42}
True
|
24,596 |
def ul(
self,
text):
m = self.reWS.match(text)
ul = []
for l in m.group(2).split("\n"):
prefix, text, suffix = self._snip_whitespace(l)
ul.append("%(prefix)s* %(text)s " % locals())
return ("\n").join(ul) + "\n\n"
|
*convert plain-text to MMD unordered list*
**Key Arguments:**
- ``text`` -- the text to convert to MMD unordered list
**Return:**
- ``ul`` -- the MMD unordered list
**Usage:**
To convert text to a MMD unordered list:
.. code-block:: python
ul = md.ul(" This is a list item ")
print ul
# OUTPUT:
# * This is a list item
#
|
24,597 |
def features(sender=):
return [
many_capitalized_words,
lambda line: 1 if len(line) > TOO_LONG_SIGNATURE_LINE else 0,
binary_regex_search(RE_EMAIL),
binary_regex_search(RE_URL),
binary_regex_search(RE_RELAX_PHONE),
binary_regex_match(RE_SEPARATOR),
binary_regex_search(RE_SPECIAL_CHARS),
binary_regex_search(RE_SIGNATURE_WORDS),
binary_regex_search(RE_NAME),
lambda line: 1 if punctuation_percent(line) > 50 else 0,
lambda line: 1 if punctuation_percent(line) > 90 else 0,
contains_sender_names(sender)
]
|
Returns a list of signature features.
|
24,598 |
def alpha_beta(returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None):
returns, factor_returns = _aligned_series(returns, factor_returns)
return alpha_beta_aligned(
returns,
factor_returns,
risk_free=risk_free,
period=period,
annualization=annualization,
out=out,
)
|
Calculates annualized alpha and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float
|
24,599 |
def get_note(self, noteid):
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req(.format(noteid))
return response
|
Fetch a single note
:param folderid: The UUID of the note
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.