Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
22,600 | def configureLogger(self):
baseLogLevel = logging.DEBUG if self.isDebugLogging() else logging.INFO
logger = logging.getLogger(self._name)
logger.setLevel(baseLogLevel)
fh = handlers.RotatingFileHandler(path.join(self._getConfigPath(), self._name + ),
maxBytes=10 * 1024 * 1024, backupCount=10)
fh.setLevel(baseLogLevel)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
formatter = logging.Formatter()
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger | Configures the python logging system to log to a debug file and to stdout for warn and above.
:return: the base logger. |
22,601 | def process_form(self, instance, field, form,
empty_marker=None, emptyReturnsMarker=False):
values = {}
ref_def = form.get("ReferenceDefinition")
ref_def_uid = ref_def and ref_def[0]
if ref_def_uid:
ref_def_obj = api.get_object_by_uid(ref_def_uid)
ref_results = ref_def_obj.getReferenceResults()
rr_by_uid = dict(map(lambda r: (r.get("uid"), r), ref_results))
values.update(rr_by_uid)
service_uids = form.get("uids", [])
for uid in service_uids:
result = self._get_spec_value(form, uid, "result")
if not result:
continue
s_min = self._get_spec_value(form, uid, "min", result)
s_max = self._get_spec_value(form, uid, "max", result)
service = api.get_object_by_uid(uid)
values[uid] = {
"keyword": service.getKeyword(),
"uid": uid,
"result": result,
"min": s_min,
"max": s_max
}
return values.values(), {} | Return a list of dictionaries fit for ReferenceResultsField
consumption. Only services which have float()able entries in result,min
and max field will be included. If any of min, max, or result fields
are blank, the row value is ignored here. |
22,602 | def serve_forever(self, poll_interval=0.5):
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
r, w, e = _eintr_retry(select.select, [self], [], [], poll_interval)
if self in r:
self._handle_request_noblock()
finally:
self.__shutdown_request = False
self.__is_shut_down.set() | Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread. |
22,603 | def redirect(self):
logger.info("--- In SSO Redirect ---")
saml_msg = self.unpack_redirect()
try:
_key = saml_msg["key"]
saml_msg = IDP.ticket[_key]
self.req_info = saml_msg["req_info"]
del IDP.ticket[_key]
except KeyError:
try:
self.req_info = IDP.parse_authn_request(saml_msg["SAMLRequest"],
BINDING_HTTP_REDIRECT)
except KeyError:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
_req = self.req_info.message
if "SigAlg" in saml_msg and "Signature" in saml_msg:
issuer = _req.issuer.text
_certs = IDP.metadata.certs(issuer, "any", "signing")
verified_ok = False
for cert in _certs:
if verify_redirect_signature(saml_msg, IDP.sec.sec_backend,
cert):
verified_ok = True
break
if not verified_ok:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
if self.user:
if _req.force_authn:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
else:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT) | This is the HTTP-redirect endpoint |
22,604 | def touch(fname, times=None):
fpath, f = os.path.split(fname)
if not os.path.exists(fpath):
os.makedirs(fpath)
with open(fname, ):
os.utime(fname, times) | Creates an empty file at fname, creating path if necessary
Answer taken from Stack Overflow http://stackoverflow.com/a/1160227
User: ephemient http://stackoverflow.com/users/20713
License: CC-BY-SA 3.0 https://creativecommons.org/licenses/by-sa/3.0/ |
22,605 | def getTypeByPosition(self, idx):
try:
return self.__namedTypes[idx].asn1Object
except IndexError:
raise error.PyAsn1Error() | Return ASN.1 type object by its position in fields set.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
:
ASN.1 type
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range |
22,606 | def _format_output(kernel_restart, packages, verbose, restartable, nonrestartable, restartservicecommands,
restartinitcommands):
if not verbose:
packages = restartable + nonrestartable
if kernel_restart:
packages.append()
return packages
else:
ret =
if kernel_restart:
ret =
if packages:
ret += "Found {0} processes using old versions of upgraded files.\n".format(len(packages))
ret += "These are the packages:\n"
if restartable:
ret += "Of these, {0} seem to contain systemd service definitions or init scripts " \
"which can be used to restart them:\n".format(len(restartable))
for package in restartable:
ret += package +
for program in packages[package][]:
ret += program +
if restartservicecommands:
ret += "\n\nThese are the systemd services:\n"
ret += .join(restartservicecommands)
if restartinitcommands:
ret += "\n\nThese are the initd scripts:\n"
ret += .join(restartinitcommands)
if nonrestartable:
ret += "\n\nThese processes {0} do not seem to have an associated init script " \
"to restart them:\n".format(len(nonrestartable))
for package in nonrestartable:
ret += package +
for program in packages[package][]:
ret += program +
return ret | Formats the output of the restartcheck module.
Returns:
String - formatted output.
Args:
kernel_restart: indicates that newer kernel is instaled
packages: list of packages that should be restarted
verbose: enables extensive output
restartable: list of restartable packages
nonrestartable: list of non-restartable packages
restartservicecommands: list of commands to restart services
restartinitcommands: list of commands to restart init.d scripts |
22,607 | def parse_unknown_args(args):
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith():
if in arg:
key = arg.split()[0][2:]
value = arg.split()[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval | Parse arguments not consumed by arg parser into a dicitonary |
22,608 | def chmod(self, path, mode):
path = self._adjust_cwd(path)
self._log(DEBUG, % (path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_SETSTAT, path, attr) | Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param path: path of the file to change the permissions of
@type path: str
@param mode: new permissions
@type mode: int |
22,609 | def _validate_place_types(self, types):
for pt in types:
if pt not in self.place_types:
raise InvalidPlaceTypeError(pt)
return {: ",".join(types)} | Validate place types and return a mapping for use in requests. |
22,610 | def manhattan_distant(vector1, vector2):
vector1 = np.mat(vector1)
vector2 = np.mat(vector2)
return np.sum(np.abs(vector1 - vector2)) | 曼哈顿距离 |
22,611 | def key_value_to_tree(data):
tree = {}
for flatkey, value in six.iteritems(data):
t = tree
keys = flatkey.split(__opts__[])
for i, key in enumerate(keys, 1):
if i == len(keys):
t[key] = value
else:
t = t.setdefault(key, {})
return tree | Convert key/value to tree |
22,612 | def args_range(min_value, max_value, *args):
not_null(*args)
if not all(map(lambda v: min_value <= v <= max_value, args)):
raise ValueError("Argument must be between {0} and {1}!".format(min_value, max_value)) | 检查参数范围 |
22,613 | def _hammer_function_precompute(self,x0, L, Min, model):
if x0 is None: return None, None
if len(x0.shape)==1: x0 = x0[None,:]
m = model.predict(x0)[0]
pred = model.predict(x0)[1].copy()
pred[pred<1e-16] = 1e-16
s = np.sqrt(pred)
r_x0 = (m-Min)/L
s_x0 = s/L
r_x0 = r_x0.flatten()
s_x0 = s_x0.flatten()
return r_x0, s_x0 | Pre-computes the parameters of a penalizer centered at x0. |
22,614 | def apply_bbox(sf,ax):
limits = sf.bbox
xlim = limits[0],limits[2]
ylim = limits[1],limits[3]
ax.set_xlim(xlim)
ax.set_ylim(ylim) | Use bbox as xlim and ylim in ax |
22,615 | def run_gblocks(align_fasta_file, **kwargs):
cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs)
r, e = cl.run()
print("Gblocks:", cl, file=sys.stderr)
if e:
print("***Gblocks could not run", file=sys.stderr)
return None
else:
print(r, file=sys.stderr)
alignp = re.sub(r, \
r, r, flags=re.DOTALL)
alignp = int(alignp)
if alignp <= 10:
print("** WARNING ** Only %s %% positions retained by Gblocks. " \
"Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr)
return None
else:
return align_fasta_file+"-gb" | remove poorly aligned positions and divergent regions with Gblocks |
22,616 | def get_file_courses(self, id, course_id, include=None):
path = {}
data = {}
params = {}
path["course_id"] = course_id
path["id"] = id
if include is not None:
self._validate_enum(include, ["user"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/files/{id}".format(**path), data=data, params=params, single_item=True) | Get file.
Returns the standard attachment json object |
22,617 | def tables_get(self, table_name):
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. |
22,618 | def set_opt(self, name, value):
self.cache[][name] = value
if name == :
self.cache[] = self.def_delims if not value else (
,
,
) | Set option. |
22,619 | def watch(self, path, recursive=False):
self._logger.info(, path)
handler = FileHandler(self)
self._observer = Observer()
self._observer.schedule(handler, path, recursive)
self._logger.info()
self._observer.start()
self._watch = True
try:
self._logger.info()
while self._watch:
time.sleep(1)
except KeyboardInterrupt:
self.stop_watching()
self._observer.join() | Watch for files in a directory and apply normalizations.
Watch for new or changed files in a directory and apply
normalizations over them.
Args:
path: Path to the directory.
recursive: Whether to find files recursively or not. |
22,620 | def wait_script(name,
source=None,
template=None,
onlyif=None,
unless=None,
cwd=None,
runas=None,
shell=None,
env=None,
stateful=False,
umask=None,
use_vt=False,
output_loglevel=,
hide_output=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs):
yes$PATHenviron.getPATH/bin:/usr/bin/my/special/bin:s results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will be allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
success_stderr: This parameter will be allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
namechangesresultcomment'} | Download a script from a remote source and execute it only if a watch
statement calls it.
source
The source script being downloaded to the minion, this source script is
hosted on the salt master server. If the file is located on the master
in the directory named spam, and is called eggs, the source string is
salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
cwd
The current working directory to execute the command in, defaults to
/root
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.wait_script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.wait_script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will be allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
success_stderr: This parameter will be allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon |
22,621 | def list_view_on_selected(self, widget, selected_item_key):
self.lbl.set_text( + self.listView.children[selected_item_key].get_text()) | The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly |
22,622 | def _lab_to_rgb(labs):
labs, n_dim = _check_color_dim(labs)
y = (labs[:, 0] + 16.) / 116.
x = (labs[:, 1] / 500.) + y
z = y - (labs[:, 2] / 200.)
xyz = np.concatenate(([x], [y], [z]))
over = xyz > 0.2068966
xyz[over] = xyz[over] ** 3.
xyz[~over] = (xyz[~over] - 0.13793103448275862) / 7.787
rgbs = np.dot(_xyz2rgb_norm, xyz).T
over = rgbs > 0.0031308
rgbs[over] = 1.055 * (rgbs[over] ** (1. / 2.4)) - 0.055
rgbs[~over] *= 12.92
if n_dim == 4:
rgbs = np.concatenate((rgbs, labs[:, 3]), axis=1)
rgbs = np.clip(rgbs, 0., 1.)
return rgbs | Convert Nx3 or Nx4 lab to rgb |
22,623 | def gen_df_state(
list_table: list,
set_initcond: set,
set_runcontrol: set,
set_input_runcontrol: set)->pd.DataFrame:
df_var_site = gen_df_site(list_table)
df_var_runcontrol = gen_df_runcontrol(
set_initcond, set_runcontrol, set_input_runcontrol)
df_var_initcond = gen_df_initcond(set_initcond, set_runcontrol)
df_var_state = proc_df_state(
df_var_site, df_var_runcontrol, df_var_initcond)
df_var_state = df_var_state.sort_index()
df_var_state = df_var_state.reset_index()
df_var_state = df_var_state.drop_duplicates()
df_var_state = df_var_state.set_index()
return df_var_state | generate dataframe of all state variables used by supy
Parameters
----------
list_table : list
csv files for site info: `SUEWS_xx.csv` on github SUEWS-docs repo
set_initcond : set
initial condition related variables
set_runcontrol : set
runcontrol related variables
set_input_runcontrol : set
runcontrol related variables used as supy input
Returns
-------
pd.DataFrame
Description of all state variables used by supy |
22,624 | def parse_reports(self):
self.junction_saturation_all = dict()
self.junction_saturation_known = dict()
self.junction_saturation_novel = dict()
for f in self.find_log_files():
parsed = dict()
for l in f[].splitlines():
r = re.search(r"^([xyzw])=c\(([\d,]+)\)$", l)
if r:
parsed[r.group(1)] = [float(i) for i in r.group(2).split()]
if len(parsed) == 4:
if parsed[][-1] == 0:
log.warn("Junction saturation data all zeroes, skipping: ".format(f[]))
else:
if f[] in self.junction_saturation_all:
log.debug("Duplicate sample name found! Overwriting: {}".format(f[]))
self.add_data_source(f, section=)
self.junction_saturation_all[f[]] = OrderedDict()
self.junction_saturation_known[f[]] = OrderedDict()
self.junction_saturation_novel[f[]] = OrderedDict()
for k, v in enumerate(parsed[]):
self.junction_saturation_all[f[]][v] = parsed[][k]
self.junction_saturation_known[f[]][v] = parsed[][k]
self.junction_saturation_novel[f[]][v] = parsed[][k]
self.junction_saturation_all = self.ignore_samples(self.junction_saturation_all)
self.junction_saturation_known = self.ignore_samples(self.junction_saturation_known)
self.junction_saturation_novel = self.ignore_samples(self.junction_saturation_novel)
if len(self.junction_saturation_all) > 0:
pconfig = {
: ,
: ,
: ,
: 0,
: "Percent of reads",
: 0,
: 100,
: "<strong>{point.x}% of reads</strong>: {point.y:.2f}",
: [
{: },
{: },
{: }
],
: ,
: plot_single()
}
self.add_section (
name = ,
anchor = ,
description = ,
plot = linegraph.plot([
self.junction_saturation_known,
self.junction_saturation_novel,
self.junction_saturation_all
], pconfig)
)
return len(self.junction_saturation_all) | Find RSeQC junction_saturation frequency reports and parse their data |
22,625 | def most_even(number, group):
count, rest = divmod(number, group)
counts = zip_longest([count] * group, [1] * rest, fillvalue=0)
chunks = [sum(one) for one in counts]
logging.debug(, chunks)
return chunks | Divide a number into a list of numbers as even as possible. |
22,626 | def unique(seen, *iterables):
_add = seen.add
return (i for i in chain(*iterables) if i not in seen and not _add(i)) | Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator: |
22,627 | def dump(doc, output_stream=None):
assert type(doc) == Doc, "panflute.dump needs input of type panflute.Doc"
if output_stream is None:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) if py2 else codecs.getwriter("utf-8")(sys.stdout.detach())
output_stream = sys.stdout
if doc.api_version is None:
Citation.backup = Citation.to_json
Citation.to_json = Citation.to_json_legacy
for E in [Table, OrderedList, Quoted, Math]:
E.backup = E._slots_to_json
E._slots_to_json = E._slots_to_json_legacy
for E in EMPTY_ELEMENTS:
E.backup = E.to_json
E.to_json = Element.to_json
json_serializer = lambda elem: elem.to_json()
output_stream.write(json.dumps(
obj=doc,
default=json_serializer,
check_circular=False,
separators=(, ),
ensure_ascii=False
))
if doc.api_version is None:
Citation.to_json = Citation.backup
for E in [Table, OrderedList, Quoted, Math]:
E._slots_to_json = E.backup
for E in EMPTY_ELEMENTS:
E.to_json = E.backup | Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`) |
22,628 | def v1_highlights_get(response, kvlclient, file_id_str, max_elapsed = 300):
file_id = make_file_id(file_id_str)
kvlclient.setup_namespace(highlights_kvlayer_tables)
payload_strs = list(kvlclient.get(, file_id))
if not (payload_strs and payload_strs[0][1]):
response.status = 500
payload = {
: ERROR,
: {
: 8,
: }}
logger.critical(, file_id, payload_strs)
else:
payload_str = payload_strs[0][1]
try:
payload = json.loads(payload_str)
if payload[] == HIGHLIGHTS_PENDING:
elapsed = time.time() - payload.get(, 0)
if elapsed > max_elapsed:
response.status = 500
payload = {
: ERROR,
: {
: 8,
: }}
logger.critical(, file_id)
kvlclient.put(, (file_id, json.dumps(payload)))
else:
payload[] = elapsed
logger.info(, file_id)
except Exception, exc:
logger.critical(,
payload_str, exc_info=True)
response.status = 400
payload = {
: ERROR,
: {
: 9,
: % file_id}
}
return payload | Obtain highlights for a document POSTed previously to this end
point. See documentation for v1_highlights_post for further
details. If the `state` is still `pending` for more than
`max_elapsed` after the start of the `WorkUnit`, then this reports
an error, although the `WorkUnit` may continue in the background. |
22,629 | def page(self, course):
if not self.webdav_host:
raise web.notfound()
url = self.webdav_host + "/" + course.get_id()
username = self.user_manager.session_username()
apikey = self.user_manager.session_api_key()
return self.template_helper.get_renderer().course_admin.webdav(course, url, username, apikey) | Get all data and display the page |
22,630 | def public_key_to_connection_id(self, public_key):
with self._connections_lock:
for connection_id, connection_info in self._connections.items():
if connection_info.public_key == public_key:
return connection_id
return None | Get stored connection id for a public key. |
22,631 | def get_effective_domain_id(request):
default_domain = get_default_domain(request)
domain_id = default_domain.get()
domain_name = default_domain.get()
return None if domain_name == DEFAULT_DOMAIN else domain_id | Gets the id of the default domain.
If the requests default domain is the same as DEFAULT_DOMAIN,
return None. |
22,632 | def get_url_endpoint(self):
endpoint = self.url
if self.type not in (,):
endpoint = % (
self.catalog.slug,
self.id
)
return endpoint | Returns the Hypermap endpoint for a layer.
This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint. |
22,633 | def kick_job(self, job: JobOrID) -> None:
self._send_cmd(b % _to_id(job), b) | Moves a delayed or buried job into the ready queue.
:param job: The job or job ID to kick. |
22,634 | def experiments_predictions_list(self, listing_url, offset=0, limit=-1, properties=None):
return sco.get_run_listing(
listing_url,
offset=offset,
limit=limit,
properties=properties
) | Get list of experiment resources from a SCO-API.
Parameters
----------
listing_url : string
url for experiments run listing.
offset : int, optional
Starting offset for returned list items
limit : int, optional
Limit the number of items in the result
properties : List(string)
List of additional object properties to be included for items in
the result
Returns
-------
List(scoserv.ModelRunDescriptor)
List of model run descriptors |
22,635 | def res_to_str(res):
if in res.request.headers:
res.request.headers[] = "*****"
return % (res.url,
str(res.request.headers),
OLD_REQ and res.request.data or res.request.body,
res.headers,
res.status_code,
res.reason,
res.text) | :param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it |
22,636 | def plot(feature, mp=None, style_function=None, **map_kwargs):
map_kwargs.setdefault(, basemaps.Stamen.Terrain)
if feature.is_empty:
warnings.warn("The geometry is empty.")
mp = Map(**map_kwargs) if mp is None else mp
else:
if mp is None:
center = feature.envelope.centroid.reproject(WGS84_CRS)
zoom = zoom_level_from_geometry(feature.envelope)
mp = Map(center=(center.y, center.x), zoom=zoom, **map_kwargs)
mp.add_layer(layer_from_element(feature, style_function))
return mp | Plots a GeoVector in an ipyleaflet map.
Parameters
----------
feature : telluric.vectors.GeoVector, telluric.features.GeoFeature, telluric.collections.BaseCollection
Data to plot.
mp : ipyleaflet.Map, optional
Map in which to plot, default to None (creates a new one).
style_function : func
Function that returns an style dictionary for
map_kwargs : kwargs, optional
Extra parameters to send to ipyleaflet.Map. |
22,637 | def authenticate(name, remote_addr, password, cert, key, verify_cert=True):
ret = {
: name,
: remote_addr,
: cert,
: key,
: verify_cert
}
try:
client = __salt__[](
remote_addr, cert, key, verify_cert
)
except SaltInvocationError as e:
return _error(ret, six.text_type(e))
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
if client.trusted:
return _success(ret, "Already authenticated.")
try:
result = __salt__[](
remote_addr, password, cert, key, verify_cert
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
if result is not True:
return _error(
ret,
"Failed to authenticate with peer: {0}".format(remote_addr)
)
msg = "Successfully authenticated with peer: {0}".format(remote_addr)
ret[] = msg
return _success(
ret,
msg
) | Authenticate with a remote peer.
.. notes:
This function makes every time you run this a connection
to remote_addr, you better call this only once.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
password :
The PaSsW0rD
cert :
PEM Formatted SSL Zertifikate.
Examples:
/root/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
/root/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
name:
Ignore this. This is just here for salt. |
22,638 | def relevant(symbol, token=, version=):
_raiseIfNotStr(symbol)
return _getJson( + symbol + , token, version) | Same as peers
https://iexcloud.io/docs/api/#relevant
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result |
22,639 | def could_be(self, other):
if type(other) is not type(self):
return NotImplemented
if self == other:
return True
for attr in [, , , , , , ]:
if attr not in self or attr not in other:
continue
puncmap = dict((ord(char), None) for char in string.punctuation)
s = self[attr].lower().translate(puncmap)
o = other[attr].lower().translate(puncmap)
if s == o:
continue
if attr in {, , }:
if (({len(comp) for comp in s.split()} == {1} and [el[0] for el in o.split()] == s.split()) or
({len(comp) for comp in o.split()} == {1} and [el[0] for el in s.split()] == o.split())):
continue
return False
return True | Return True if the other PersonName is not explicitly inconsistent. |
22,640 | def _coulomb(n1, n2, k, r):
delta = [x2 - x1 for x1, x2 in zip(n1[], n2[])]
distance = sqrt(sum(d ** 2 for d in delta))
if distance < 0.1:
delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)]
distance = sqrt(sum(d ** 2 for d in delta))
| Calculates Coulomb forces and updates node data. |
22,641 | def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
if not origin is None:
name = name.relativize(origin)
dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
if len(dlabels) != len(name.labels):
raise dns.exception.SyntaxError()
dlabels.reverse()
text = .join(dlabels)
if want_plus_prefix:
text = + text
return text | Convert an ENUM domain name into an E.164 number.
@param name: the ENUM domain name.
@type name: dns.name.Name object.
@param origin: A domain containing the ENUM domain name. The
name is relativized to this domain before being converted to text.
@type: dns.name.Name object or None
@param want_plus_prefix: if True, add a '+' to the beginning of the
returned number.
@rtype: str |
22,642 | def until_condition(self, condition, condition_description):
end_time = time.time() + self._timeout
count = 1
while True:
try:
if not hasattr(condition, ):
raise TypeError("condition is not callable")
value = condition()
if type(value) is bool and value is not False:
return value
elif type(value) is not bool and value is not None:
return value
else:
logger.debug("
except self._ignored_exceptions as ex:
logger.debug("Captured {0} : {1}".format(str(ex.__class__).replace("<type >", ""),
str(ex)))
time.sleep(self._poll)
count += 1
if time.time() > end_time:
break
raise TimeoutException(
msg="condition <" + condition_description + "> was not true after " + str(self._timeout) + " seconds.") | Waits until conditions is True or returns a non-None value.
If any of the trait is still not present after timeout, raises a TimeoutException. |
22,643 | def userParamFromDict(attributes):
keys = [, , , ]
return tuple(attributes[key] if key in attributes else None for key in keys) | Python representation of a mzML userParam = tuple(name, value,
unitAccession, type)
:param attributes: #TODO: docstring
:returns: #TODO: docstring |
22,644 | def get_gc_property(value, is_bytes=False):
obj = unidata.ascii_properties if is_bytes else unidata.unicode_properties
if value.startswith():
negate = True
value = value[1:]
else:
negate = False
value = unidata.unicode_alias[].get(value, value)
assert 1 <= len(value) <= 2,
if not negate:
p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], None)
value = .join(
[v for k, v in obj.get(p1, {}).items() if not k.startswith()]
) if p2 is None else obj.get(p1, {}).get(p2, )
else:
p1, p2 = (value[0], value[1]) if len(value) > 1 else (value[0], )
value = obj.get(p1, {}).get( + p2, )
assert value,
return value | Get `GC` property. |
22,645 | def name(self):
if self._name is None:
self._name = self.__class__.__name__
return self._name | Algo name. |
22,646 | def on_state_execution_status_changed_after(self, model, prop_name, info):
from rafcon.gui.utils.notification_overview import NotificationOverview
from rafcon.core.states.state import State
def name_and_next_state(state):
assert isinstance(state, State)
if state.is_root_state_of_library:
return state.parent.parent, state.parent.name
else:
return state.parent, state.name
def create_path(state, n=3, separator=):
next_parent, name = name_and_next_state(state)
path = separator + name
n -= 1
while n > 0 and isinstance(next_parent, State):
next_parent, name = name_and_next_state(next_parent)
path = separator + name + path
n -= 1
if isinstance(next_parent, State):
path = separator + + path
return path
if in info and in info[]:
overview = NotificationOverview(info)
if overview[][-1] == :
active_state = overview[][-1].state
assert isinstance(active_state, State)
path_depth = rafcon.gui.singleton.global_gui_config.get_config_value("EXECUTION_TICKER_PATH_DEPTH", 3)
message = self._fix_text_of_label + create_path(active_state, path_depth)
if rafcon.gui.singleton.main_window_controller.view is not None:
self.ticker_text_label.set_text(message)
else:
logger.warn("Not initialized yet") | Show current execution status in the widget
This function specifies what happens if the state machine execution status of a state changes
:param model: the model of the state that has changed (most likely its execution status)
:param prop_name: property name that has been changed
:param info: notification info dictionary
:return: |
22,647 | def __initialize_node(self, attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)):
self["traced"] = umbra.ui.nodes.GraphModelAttribute(name="traced",
value=foundations.trace.is_traced(self.__module),
flags=attributes_flags)
self.update_node_attributes() | Initializes the node.
:param attributes_flags: Attributes flags.
:type attributes_flags: int |
22,648 | def redo(self, channel, image):
imname = image.get(, )
chname = channel.name
in_contents = self.is_in_contents(chname, imname)
old_highlight = channel.extdata.contents_old_highlight
if image is not None:
key = self._get_hl_key(chname, image)
new_highlight = set([key])
else:
new_highlight = set([])
if self.highlight_tracks_keyboard_focus:
if in_contents:
self.update_highlights(self._hl_path, new_highlight)
self._hl_path = new_highlight
else:
if in_contents:
self.update_highlights(old_highlight, new_highlight)
channel.extdata.contents_old_highlight = new_highlight
return True | This method is called when an image is set in a channel. |
22,649 | def GetMessage(self, log_source, lcid, message_identifier):
event_log_provider_key = self._GetEventLogProviderKey(log_source)
if not event_log_provider_key:
return None
generator = self._GetMessageFileKeys(event_log_provider_key)
if not generator:
return None
message_string = None
for message_file_key in generator:
message_string = self._GetMessage(
message_file_key, lcid, message_identifier)
if message_string:
break
if self._string_format == :
message_string = self._ReformatMessageString(message_string)
return message_string | Retrieves a specific message for a specific Event Log source.
Args:
log_source (str): Event Log source.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available. |
22,650 | def quit(self):
if self._process is None:
logger.debug()
return
try:
logger.debug()
process_group_id = os.getpgid(self._process.pid)
os.killpg(process_group_id, signal.SIGTERM)
logger.debug( % process_group_id)
self._process_monitor.join()
except OSError:
logger.error()
self._process = None | Quit the player, blocking until the process has died |
22,651 | def sum_dicts(dicts, normalize=False):
sum_dict = {}
for dicti in dicts:
for key in dicti:
val = dicti[key]
if isinstance(val, numbers.Number):
sum_dict[key] = sum_dict.get(key, 0) + val
else:
sum_dict[key] = val
if normalize:
return norm_int_dict(sum_dict)
return sum_dict | Sums the given dicts into a single dict mapping each numberic-valued
key to the sum of its mappings in all given dicts. Keys mapping to
non-numeric values retain the last value (by the given order).
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts. |
22,652 | def set_fraction(self, value):
if value < 0:
value *= -1
value = min(value, 1)
if self.horizontal:
width = int(self.width * value)
height = self.height
else:
width = self.width
height = int(self.height * value)
self.canvas.coords(self.meter, self.xpos, self.ypos,
self.xpos + width, self.ypos + height) | Set the meter indicator. Value should be between 0 and 1. |
22,653 | def ticket_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/tickets
api_path = "/api/v2/tickets/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/core/tickets#delete-ticket |
22,654 | def conversion_rate(self):
participants = self.participant_count
if participants == 0:
return 0.0
return self.experiment.conversions_for(self.name) / float(participants) | The percentage of participants that have converted for this variant.
Returns a > 0 float representing a percentage rate. |
22,655 | def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
name = varname
if varname_prefix is not None \
and name.startswith(varname_prefix):
name = name[len(varname_prefix) + 1:]
if savename_prefix is not None:
name = savename_prefix + + name
return name | Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable |
22,656 | def _update_rs_with_primary_from_member(
sds,
replica_set_name,
server_description):
assert replica_set_name is not None
if replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
elif (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
return _check_has_primary(sds) | RS with known primary. Process a response from a non-primary.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns new topology type. |
22,657 | def fromProfileName(cls, name):
with profiles.ProfileStore.open() as config:
return cls.fromProfile(config.load(name)) | Return a `SessionAPI` from a given configuration profile name.
:see: `ProfileStore`. |
22,658 | def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict:
params_dict = self.as_dict(quiet=True)
if not preference_orders:
preference_orders = []
preference_orders.append(["dataset_reader", "iterator", "model",
"train_data_path", "validation_data_path", "test_data_path",
"trainer", "vocabulary"])
preference_orders.append(["type"])
def order_func(key):
order_tuple = [order.index(key) if key in order else len(order) for order in preference_orders]
return order_tuple + [key]
def order_dict(dictionary, order_func):
result = OrderedDict()
for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])):
result[key] = order_dict(val, order_func) if isinstance(val, dict) else val
return result
return order_dict(params_dict, order_func) | Returns Ordered Dict of Params from list of partial order preferences.
Parameters
----------
preference_orders: List[List[str]], optional
``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means
"A" > "B" > "C". For multiple preference_orders first will be considered first.
Keys not found, will have last but alphabetical preference. Default Preferences:
``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path",
"test_data_path", "trainer", "vocabulary"], ["type"]]`` |
22,659 | def manipulate(self, stored_instance, component_instance):
self._ipopo_instance = stored_instance
self._context = stored_instance.bundle_context | Stores the given StoredInstance bean.
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance |
22,660 | def hgetall(self, key):
def format_response(value):
return dict(zip(value[::2], value[1::2]))
return self._execute(
[b, key], format_callback=format_response) | Returns all fields and values of the has stored at `key`.
The underlying redis `HGETALL`_ command returns an array of
pairs. This method converts that to a Python :class:`dict`.
It will return an empty :class:`dict` when the key is not
found.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the size
of the hash.
:param key: The key of the hash
:type key: :class:`str`, :class:`bytes`
:returns: a :class:`dict` of key to value mappings for all
fields in the hash
.. _HGETALL: http://redis.io/commands/hgetall |
22,661 | def is_from_parent(cls, attribute_name, value=None):
if value is None:
try:
value = getattr(cls, attribute_name)
except AttributeError:
return False
for base in cls.__bases__:
try:
return getattr(base, attribute_name) is value
except AttributeError:
pass
return False | Tests if the current attribute value is shared by a parent of the given
class.
Returns None if the attribute value is None.
:param cls: Child class with the requested attribute
:param attribute_name: Name of the attribute to be tested
:param value: The exact value in the child class (optional)
:return: True if the attribute value is shared with a parent class |
22,662 | def _append_record(test_data, results, test_path):
statuses = test_data.get("statuses")
jenkins_data = test_data.get("jenkins") or {}
data = [
("title", test_data.get("test_name") or _get_testname(test_path)),
("verdict", statuses.get("overall")),
("source", test_data.get("source")),
("job_name", jenkins_data.get("job_name")),
("run", jenkins_data.get("build_number")),
("params", _filter_parameters(test_data.get("params"))),
(
"time",
_calculate_duration(test_data.get("start_time"), test_data.get("finish_time")) or 0,
),
]
test_id = test_data.get("polarion")
if test_id:
if isinstance(test_id, list):
test_id = test_id[0]
data.append(("test_id", test_id))
results.append(OrderedDict(data)) | Adds data of single testcase results to results database. |
22,663 | def delete_ipv6(self, ipv6_id):
uri = % (ipv6_id)
return super(ApiNetworkIPv6, self).delete(uri) | Delete ipv6 |
22,664 | def sg_argmin(tensor, opt):
r
opt += tf.sg_opt(axis=tensor.get_shape().ndims - 1)
return tf.argmin(tensor, opt.axis, opt.name) | r"""Returns the indices of the minimum values along the specified axis.
See `tf.argin()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. |
22,665 | def translate_rgb_to_ansi_code(red, green, blue, offset, colormode):
if colormode == terminal.NO_COLORS:
return ,
if colormode == terminal.ANSI_8_COLORS or colormode == terminal.ANSI_16_COLORS:
color_code = ansi.rgb_to_ansi16(red, green, blue)
start_code = ansi.ANSI_ESCAPE_CODE.format(
code=color_code + offset - ansi.FOREGROUND_COLOR_OFFSET)
end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)
return start_code, end_code
if colormode == terminal.ANSI_256_COLORS:
color_code = ansi.rgb_to_ansi256(red, green, blue)
start_code = ansi.ANSI_ESCAPE_CODE.format(code=.format(
base=8 + offset, code=color_code))
end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)
return start_code, end_code
if colormode == terminal.TRUE_COLORS:
start_code = ansi.ANSI_ESCAPE_CODE.format(code=.format(
base=8 + offset, red=red, green=green, blue=blue))
end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)
return start_code, end_code
raise ColorfulError(.format(colormode)) | Translate the given RGB color into the appropriate ANSI escape code
for the given color mode.
The offset is used for the base color which is used.
The ``colormode`` has to be one of:
* 0: no colors / disabled
* 8: use ANSI 8 colors
* 16: use ANSI 16 colors (same as 8 but with brightness)
* 256: use ANSI 256 colors
* 0xFFFFFF / 16777215: use 16 Million true colors
:param int red: the red channel value
:param int green: the green channel value
:param int blue: the blue channel value
:param int offset: the offset to use for the base color
:param int colormode: the color mode to use. See explanation above |
22,666 | def wait_until_alert_is_present(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
locator = None
def wait():
return WebDriverWait(self.driver, timeout).until(EC.alert_is_present())
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, ) | Waits for an alert to be present
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found |
22,667 | def execute_all_rules(self, matches, context):
ret = []
for priority, priority_rules in groupby(sorted(self), lambda rule: rule.priority):
sorted_rules = toposort_rules(list(priority_rules))
for rules_group in sorted_rules:
rules_group = list(sorted(rules_group, key=self.index))
group_log_level = None
for rule in rules_group:
if group_log_level is None or group_log_level < rule.log_level:
group_log_level = rule.log_level
log(group_log_level, "%s independent rule(s) at priority %s.", len(rules_group), priority)
for rule in rules_group:
when_response = execute_rule(rule, matches, context)
if when_response is not None:
ret.append((rule, when_response))
return ret | Execute all rules from this rules list. All when condition with same priority will be performed before
calling then actions.
:param matches:
:type matches:
:param context:
:type context:
:return:
:rtype: |
22,668 | def _set_gre_ttl(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), is_leaf=True, yang_name="gre-ttl", rest_name="ttl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: u, u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "uint32",
: ,
})
self.__gre_ttl = t
if hasattr(self, ):
self._set() | Setter method for gre_ttl, mapped from YANG variable /interface/tunnel/gre_ttl (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_gre_ttl is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gre_ttl() directly. |
22,669 | def _top_element(self):
attrs = {"project-id": self.config["polarion-project-id"]}
document_relative_path = self.config.get("requirements-document-relative-path")
if document_relative_path:
attrs["document-relative-path"] = document_relative_path
top = etree.Element("requirements", attrs)
return top | Returns top XML element. |
22,670 | def namedb_state_mutation_sanity_check( opcode, op_data ):
return True | Make sure all mutate fields for this operation are present.
Return True if so
Raise exception if not |
22,671 | def to_links_df(regressor_type,
regressor_kwargs,
trained_regressor,
tf_matrix_gene_names,
target_gene_name):
def pythonic():
feature_importances = to_feature_importances(regressor_type, regressor_kwargs, trained_regressor)
links_df = pd.DataFrame({: tf_matrix_gene_names, : feature_importances})
links_df[] = target_gene_name
clean_links_df = links_df[links_df.importance > 0].sort_values(by=, ascending=False)
return clean_links_df[[, , ]]
if is_sklearn_regressor(regressor_type):
return pythonic()
elif is_xgboost_regressor(regressor_type):
raise ValueError()
else:
raise ValueError( + regressor_type) | :param regressor_type: string. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param trained_regressor: the trained model from which to extract the feature importances.
:param tf_matrix_gene_names: the list of names corresponding to the columns of the tf_matrix used to train the model.
:param target_gene_name: the name of the target gene.
:return: a Pandas DataFrame['TF', 'target', 'importance'] representing inferred regulatory links and their
connection strength. |
22,672 | def plot_variability_thresholds(varthreshpkl,
xmin_lcmad_stdev=5.0,
xmin_stetj_stdev=2.0,
xmin_iqr_stdev=2.0,
xmin_inveta_stdev=2.0,
lcformat=,
lcformatdir=None,
magcols=None):
ve stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("cant figure out the light curve format")
return None
if magcols is None:
magcols = dmagcols
with open(varthreshpkl,) as infd:
allobjects = pickle.load(infd)
magbins = allobjects[]
for magcol in magcols:
min_lcmad_stdev = (
xmin_lcmad_stdev or allobjects[magcol][]
)
min_stetj_stdev = (
xmin_stetj_stdev or allobjects[magcol][]
)
min_iqr_stdev = (
xmin_iqr_stdev or allobjects[magcol][]
)
min_inveta_stdev = (
xmin_inveta_stdev or allobjects[magcol][]
)
fig = plt.figure(figsize=(20,16))
plt.subplot(221)
plt.plot(allobjects[magcol][],
allobjects[magcol][]*1.483,
marker=,ms=1.0, linestyle=,
rasterized=True)
plt.plot(allobjects[magcol][],
np.array(allobjects[magcol][])*1.483,
linewidth=3.0)
plt.plot(
allobjects[magcol][],
np.array(allobjects[magcol][])*1.483 +
min_lcmad_stdev*np.array(
allobjects[magcol][]
),
linewidth=3.0, linestyle=
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel()
plt.ylabel(r)
plt.title( % magcol)
plt.yscale()
plt.tight_layout()
plt.subplot(222)
plt.plot(allobjects[magcol][],
allobjects[magcol][],
marker=,ms=1.0, linestyle=,
rasterized=True)
plt.plot(allobjects[magcol][],
allobjects[magcol][],
linewidth=3.0)
plt.plot(
allobjects[magcol][],
np.array(allobjects[magcol][]) +
min_stetj_stdev*np.array(
allobjects[magcol][]
),
linewidth=3.0, linestyle=
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel()
plt.ylabel()
plt.title( % magcol)
plt.yscale()
plt.tight_layout()
plt.subplot(223)
plt.plot(allobjects[magcol][],
allobjects[magcol][],
marker=,ms=1.0, linestyle=,
rasterized=True)
plt.plot(allobjects[magcol][],
allobjects[magcol][],
linewidth=3.0)
plt.plot(
allobjects[magcol][],
np.array(allobjects[magcol][]) +
min_iqr_stdev*np.array(
allobjects[magcol][]
),
linewidth=3.0, linestyle=
)
plt.xlabel()
plt.ylabel()
plt.title( % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale()
plt.tight_layout()
plt.subplot(224)
plt.plot(allobjects[magcol][],
allobjects[magcol][],
marker=,ms=1.0, linestyle=,
rasterized=True)
plt.plot(allobjects[magcol][],
allobjects[magcol][],
linewidth=3.0)
plt.plot(
allobjects[magcol][],
np.array(allobjects[magcol][]) +
min_inveta_stdev*np.array(
allobjects[magcol][]
),
linewidth=3.0, linestyle=
)
plt.xlabel()
plt.ylabel(r)
plt.title(r % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale()
plt.tight_layout()
plt.savefig( % (varthreshpkl,
magcol),
bbox_inches=)
plt.close() | This makes plots for the variability threshold distributions.
Parameters
----------
varthreshpkl : str
The pickle produced by the function above.
xmin_lcmad_stdev,xmin_stetj_stdev,xmin_iqr_stdev,xmin_inveta_stdev : float or np.array
Values of the threshold values to override the ones in the
`vartresholdpkl`. If provided, will plot the thresholds accordingly
instead of using the ones in the input pickle directly.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
magcols : list of str or None
The magcol keys to use from the lcdict.
Returns
-------
str
The file name of the threshold plot generated. |
22,673 | def start( self ):
if ( self.localThreadingEnabled() and self.globalThreadingEnabled() ):
super(XThread, self).start()
else:
self.run()
self.finished.emit() | Starts the thread in its own event loop if the local and global thread
options are true, otherwise runs the thread logic in the main event
loop. |
22,674 | def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False):
train_op = self.optimize(loss, num_async_replicas=num_async_replicas,
use_tpu=use_tpu)
if use_tpu:
if self._hparams.warm_start_from:
def scaffold_fn():
self.initialize_from_ckpt(self._hparams.warm_start_from)
return tf.train.Scaffold()
else:
scaffold_fn = None
if self.hparams.tpu_enable_host_call:
host_call = self.create_train_host_call()
else:
host_call = None
remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
host_call=host_call,
scaffold_fn=scaffold_fn)
else:
if self._hparams.warm_start_from:
self.initialize_from_ckpt(self._hparams.warm_start_from)
if self._hparams.warm_start_from_second:
self.initialize_from_ckpt(self._hparams.warm_start_from_second)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) | Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode. |
22,675 | def get_snpeff_info(snpeff_string, snpeff_header):
snpeff_annotations = [
dict(zip(snpeff_header, snpeff_annotation.split()))
for snpeff_annotation in snpeff_string.split()
]
return snpeff_annotations | Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts |
22,676 | def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
try:
if message is None:
raise ValueError("message was none")
encoded_message = bytes(message, "utf-8")
if encoded_message is None:
raise ValueError("utf-8 encoding of message failed")
if domain_name:
try:
UDP_IP = socket.gethostbyname(domain_name)
except Exception:
pass
if UDP_IP is None:
raise Exception("UDP_IP is None")
if UDP_PORT is None:
raise Exception("UDP_PORT is None")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(sock_timeout)
sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
sock.close()
except socket.timeout:
logger.debug("Failed to send usage tracking data: socket timeout")
except OSError as e:
logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
except Exception as e:
logger.debug("Failed to send usage tracking data: Exception: {}".format(e)) | Send UDP messages to usage tracker asynchronously
This multiprocessing based messenger was written to overcome the limitations
of signalling/terminating a thread that is blocked on a system call. This
messenger is created as a separate process, and initialized with 2 queues,
to_send to receive messages to be sent to the internet.
Args:
- domain_name (str) : Domain name string
- UDP_IP (str) : IP address YYY.YYY.YYY.YYY
- UDP_PORT (int) : UDP port to send out on
- sock_timeout (int) : Socket timeout
- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet |
22,677 | def _add_prefix(self, split_names, start_node, group_type_name):
root = self._root_instance
prepend = []
if start_node.v_depth < 3 and not group_type_name == GROUP:
if start_node.v_depth == 0:
if group_type_name == DERIVED_PARAMETER_GROUP:
if split_names[0] == :
return split_names
else:
prepend += []
elif group_type_name == RESULT_GROUP:
if split_names[0] == :
return split_names
else:
prepend += []
elif group_type_name == CONFIG_GROUP:
if split_names[0] == :
return split_names
else:
prepend += []
elif group_type_name == PARAMETER_GROUP:
if split_names[0] == :
return split_names[0]
else:
prepend += []
else:
raise RuntimeError()
if root._is_run and root._auto_run_prepend:
dummy = root.f_wildcard(, -1)
crun = root.f_wildcard()
if any(name in root._run_information for name in split_names):
pass
elif any(name == dummy for name in split_names):
pass
elif (group_type_name == RESULT_GROUP or
group_type_name == DERIVED_PARAMETER_GROUP):
if start_node.v_depth == 0:
prepend += [, crun]
elif start_node.v_depth == 1:
if len(split_names) == 1 and split_names[0] == :
return split_names
else:
prepend += [, crun]
elif start_node.v_depth == 2 and start_node.v_name == :
prepend += [crun]
if prepend:
split_names = prepend + split_names
return split_names | Adds the correct sub branch prefix to a given name.
Usually the prefix is the full name of the parent node. In case items are added
directly to the trajectory the prefixes are chosen according to the matching subbranch.
For example, this could be 'parameters' for parameters or 'results.run_00000004' for
results added to the fifth single run.
:param split_names:
List of names of the new node (e.g. ``['mynewgroupA', 'mynewgroupB', 'myresult']``).
:param start_node:
Parent node under which the new node should be added.
:param group_type_name:
Type name of subbranch the item belongs to
(e.g. 'PARAMETER_GROUP', 'RESULT_GROUP' etc).
:return: The name with the added prefix. |
22,678 | def get_element_dt(self, el_name, tz=None, el_idx=0):
return iso8601.parse_date(self.get_element_by_name(el_name, el_idx).text, tz) | Return the text of the selected element as a ``datetime.datetime`` object.
The element text must be a ISO8601 formatted datetime
Args:
el_name : str
Name of element to use.
tz : datetime.tzinfo
Timezone in which to return the datetime.
- Without a timezone, other contextual information is required in order to
determine the exact represented time.
- If dt has timezone: The ``tz`` parameter is ignored.
- If dt is naive (without timezone): The timezone is set to ``tz``.
- ``tz=None``: Prevent naive dt from being set to a timezone. Without a
timezone, other contextual information is required in order to determine
the exact represented time.
- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
datetime.datetime |
22,679 | def cctop_submit(seq_str):
url = .format(seq_str)
r = requests.post(url)
jobid = r.text.split()[1]
return jobid | Submit a protein sequence string to CCTOP and return the job ID.
Args:
seq_str (str): Protein sequence as a string
Returns:
dict: Job ID on the CCTOP server |
22,680 | def normalize_per_cell_weinreb16_deprecated(
X,
max_fraction=1,
mult_with_mean=False,
) -> np.ndarray:
if max_fraction < 0 or max_fraction > 1:
raise ValueError()
counts_per_cell = X.sum(1).A1 if issparse(X) else X.sum(1)
gene_subset = np.all(X <= counts_per_cell[:, None] * max_fraction, axis=0)
if issparse(X): gene_subset = gene_subset.A1
tc_include = X[:, gene_subset].sum(1).A1 if issparse(X) else X[:, gene_subset].sum(1)
X_norm = X.multiply(csr_matrix(1/tc_include[:, None])) if issparse(X) else X / tc_include[:, None]
if mult_with_mean:
X_norm *= np.mean(counts_per_cell)
return X_norm | Normalize each cell [Weinreb17]_.
This is a deprecated version. See `normalize_per_cell` instead.
Normalize each cell by UMI count, so that every cell has the same total
count.
Parameters
----------
X : np.ndarray
Expression matrix. Rows correspond to cells and columns to genes.
max_fraction : float, optional
Only use genes that make up more than max_fraction of the total
reads in every cell.
mult_with_mean: bool, optional
Multiply the result with the mean of total counts.
Returns
-------
Normalized version of the original expression matrix. |
22,681 | def _validate_covars(covars, covariance_type, n_components):
from scipy import linalg
if covariance_type == :
if len(covars) != n_components:
raise ValueError(" covars have length n_components")
elif np.any(covars <= 0):
raise ValueError(" covars must be non-negative")
elif covariance_type == :
if covars.shape[0] != covars.shape[1]:
raise ValueError(" covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError(" covars must be symmetric, "
"positive-definite")
elif covariance_type == :
if len(covars.shape) != 2:
raise ValueError(" covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError(" covars must be non-negative")
elif covariance_type == :
if len(covars.shape) != 3:
raise ValueError(" covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError(" covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
", , , ") | Do basic checks on matrix covariance sizes and values. |
22,682 | def migrateFileFields(portal):
portal_types = [
"Attachment",
"ARImport",
"Instrument",
"InstrumentCertification",
"Method",
"Multifile",
"Report",
"ARReport",
"SamplePoint"]
for portal_type in portal_types:
migrate_to_blob(
portal,
portal_type=portal_type,
remove_old_value=True) | This function walks over all attachment types and migrates their FileField
fields. |
22,683 | def get_delta(D, k):
if k < 0:
raise Exception()
result = D
for i in range(k):
result = D.T.dot(result) if i % 2 == 0 else D.dot(result)
return result | Calculate the k-th order trend filtering matrix given the oriented edge
incidence matrix and the value of k. |
22,684 | def get_element_ids(self, prefix_id):
if isinstance(self.widget, widgets.MultiWidget):
ids = [.format(prefix_id, self.name, field_name) for field_name in self.widget]
elif isinstance(self.widget, (widgets.SelectMultiple, widgets.RadioSelect)):
ids = [.format(prefix_id, self.name, k) for k in range(len(self.widget.choices))]
else:
ids = [.format(prefix_id, self.name)]
return ids | Returns a single or a list of element ids, one for each input widget of this field |
22,685 | def set_cookie(
self,
key: str,
value: AnyStr=,
max_age: Optional[Union[int, timedelta]]=None,
expires: Optional[datetime]=None,
path: str=,
domain: Optional[str]=None,
secure: bool=False,
httponly: bool=False,
) -> None:
if isinstance(value, bytes):
value = value.decode()
cookie = create_cookie(key, value, max_age, expires, path, domain, secure, httponly)
self.headers.add(, cookie.output(header=)) | Set a cookie in the response headers.
The arguments are the standard cookie morsels and this is a
wrapper around the stdlib SimpleCookie code. |
22,686 | def _parse_notes_dict(sbase):
notes = sbase.getNotesString()
if notes and len(notes) > 0:
pattern = r"<p>\s*(\w+\s*\w*)\s*:\s*([\w|\s]+)<"
matches = re.findall(pattern, notes)
d = {k.strip(): v.strip() for (k, v) in matches}
return {k: v for k, v in d.items() if len(v) > 0}
else:
return {} | Creates dictionary of COBRA notes.
Parameters
----------
sbase : libsbml.SBase
Returns
-------
dict of notes |
22,687 | def to_dict(self):
return {
"id": self.get_node_id().hex,
"name": self.title,
"thumbnail": self.thumbnail.filename if self.thumbnail else None,
"language" : self.language,
"description": self.description or "",
"license": self.license,
"source_domain": self.source_domain,
"source_id": self.source_id,
"ricecooker_version": __version__,
} | to_dict: puts channel data into the format that Kolibri Studio expects
Args: None
Returns: dict of channel data |
22,688 | def edit_release_notes():
from tempfile import mkstemp
import os
import shlex
import subprocess
text_editor = shlex.split(os.environ.get(, ))
fd, tmp = mkstemp(prefix=)
try:
os.close(fd)
with open(tmp, ) as f:
f.write(u"\n\n
u"
subprocess.check_call(text_editor + [tmp])
with open(tmp, ) as f:
changes = "".join(
l for l in f.readlines() if not l.startswith())
finally:
os.remove(tmp)
return changes | Use the default text $EDITOR to write release notes.
If $EDITOR is not set, use 'nano'. |
22,689 | def canintersect(self, other):
return not (self.max < other.min or other.max < self.min) | Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR |
22,690 | def get_failed_instruments(self):
bsc = api.get_tool("bika_setup_catalog")
insts = bsc(portal_type="Instrument", is_active=True)
for i in insts:
i = i.getObject()
instr = {
: i.UID(),
: i.Title(),
}
if i.isValidationInProgress():
instr[] = % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed[].append(instr)
elif i.isCalibrationInProgress():
instr[] = % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed[].append(instr)
elif i.isOutOfDate():
instr[] = % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed[].append(instr)
elif not i.isQCValid():
instr[] = % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed[].append(instr)
elif i.getDisposeUntilNextCalibrationTest():
instr[] = % (
i.absolute_url(), i.Title()
)
self.nr_failed += 1
self.failed[].append(instr) | Find invalid instruments
- instruments who have failed QC tests
- instruments whose certificate is out of date
- instruments which are disposed until next calibration test
Return a dictionary with all info about expired/invalid instruments |
22,691 | def _make_intermediate_dirs(sftp_client, remote_directory):
if remote_directory == :
sftp_client.chdir()
return
if remote_directory == :
return
try:
sftp_client.chdir(remote_directory)
except IOError:
dirname, basename = os.path.split(remote_directory.rstrip())
_make_intermediate_dirs(sftp_client, dirname)
sftp_client.mkdir(basename)
sftp_client.chdir(basename)
return | Create all the intermediate directories in a remote host
:param sftp_client: A Paramiko SFTP client.
:param remote_directory: Absolute Path of the directory containing the file
:return: |
22,692 | def digests_are_equal(digest1, digest2):
securesystemslib.formats.HEX_SCHEMA.check_match(digest1)
securesystemslib.formats.HEX_SCHEMA.check_match(digest2)
if len(digest1) != len(digest2):
return False
are_equal = True
for element in range(len(digest1)):
if digest1[element] != digest2[element]:
are_equal = False
return are_equal | <Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise. |
22,693 | def trim_wav_pydub(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
logger.info(
"Using pydub/ffmpeg to create {} from {}".format(out_path, in_path) +
" using a start_time of {} and an end_time of {}".format(start_time,
end_time))
if out_path.is_file():
return
in_ext = in_path.suffix[1:]
out_ext = out_path.suffix[1:]
audio = AudioSegment.from_file(str(in_path), in_ext)
trimmed = audio[start_time:end_time]
trimmed.export(str(out_path), format=out_ext,
parameters=["-ac", "1", "-ar", "16000"]) | Crops the wav file. |
22,694 | def _buildDict(self):
lexDict = {}
with io.open(self.islePath, "r", encoding=) as fd:
wordList = [line.rstrip() for line in fd]
for row in wordList:
word, pronunciation = row.split(" ", 1)
word, extraInfo = word.split("(", 1)
extraInfo = extraInfo.replace(")", "")
extraInfoList = [segment for segment in extraInfo.split(",")
if ("_" not in segment and "+" not in segment and
not in segment and segment != )]
lexDict.setdefault(word, [])
lexDict[word].append((pronunciation, extraInfoList))
return lexDict | Builds the isle textfile into a dictionary for fast searching |
22,695 | def ac_viz(acdata):
acdata = np.log(acdata+0.000001)
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1]
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata)
return acdata | Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged. |
22,696 | def ensure_parent_id(self):
if self.trace_parent.span_id == self.id:
self.trace_parent.span_id = "%016x" % random.getrandbits(64)
logger.debug("Set parent id to generated %s", self.trace_parent.span_id)
return self.trace_parent.span_id | If current trace_parent has no span_id, generate one, then return it
This is used to generate a span ID which the RUM agent will use to correlate
the RUM transaction with the backend transaction. |
22,697 | def expected_error_messages(*error_messages):
def wrapper(func):
setattr(func, EXPECTED_ERROR_MESSAGES, error_messages)
return func
return wrapper | Decorator expecting defined error messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperErrorMixin.get_error_messages`
returns.
.. versionadded:: 2.0
Before this decorator was called ``ShouldBeError``. |
22,698 | def run_from_argv(self, argv):
self.argv_string = .join(argv)
super(EmailNotificationCommand, self).run_from_argv(argv) | Overriden in order to access the command line arguments. |
22,699 | def install(editable=True):
try:
__import__(package[])
except ImportError:
pass
else:
run("pip uninstall --quiet -y %s" % package[], warn=True)
cmd = "pip install --quiet "
cmd += "-e ." if editable else "."
run(cmd, warn=True) | Install this component (or remove and reinstall) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.