Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
12,900 | def execute_pubsub(self, command, *channels):
conn, address = self.get_connection(command)
if conn is not None:
return conn.execute_pubsub(command, *channels)
else:
return self._wait_execute_pubsub(address, command, channels, {}) | Executes Redis (p)subscribe/(p)unsubscribe commands.
ConnectionsPool picks separate connection for pub/sub
and uses it until explicitly closed or disconnected
(unsubscribing from all channels/patterns will leave connection
locked for pub/sub use).
There is no auto-reconnect for this PUB/SUB connection.
Returns asyncio.gather coroutine waiting for all channels/patterns
to receive answers. |
12,901 | def projects_from_cli(args):
description = (
)
parser = argparse.ArgumentParser(description=description)
req_help =
parser.add_argument(, , nargs=, default=(),
help=req_help)
meta_help =
parser.add_argument(, , nargs=, default=(),
help=meta_help)
parser.add_argument(, , nargs=, default=(),
help=)
parser.add_argument(, , action=,
help=)
parsed = parser.parse_args(args)
if not (parsed.requirements or parsed.metadata or parsed.projects):
parser.error("Missing , , or ")
projects = []
if parsed.verbose:
logging.getLogger().setLevel(logging.INFO)
projects.extend(projects_.projects_from_requirements(parsed.requirements))
metadata = []
for metadata_path in parsed.metadata:
with io.open(metadata_path) as file:
metadata.append(file.read())
projects.extend(projects_.projects_from_metadata(metadata))
projects.extend(map(packaging.utils.canonicalize_name, parsed.projects))
return projects | Take arguments through the CLI can create a list of specified projects. |
12,902 | def mod_aggregate(low, chunks, running):
rules = []
agg_enabled = [
,
,
]
if low.get() not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__[](chunk)
if tag in running:
continue
if chunk.get() == :
if in chunk:
continue
if chunk.get() != low.get():
continue
if chunk not in rules:
rules.append(chunk)
chunk[] = True
if rules:
if in low:
low[].extend(rules)
else:
low[] = rules
return low | The mod_aggregate function which looks up all rules in the available
low chunks and merges them into a single rules ref in the present low data |
12,903 | def archive(cwd,
output,
rev=,
prefix=None,
git_opts=,
user=None,
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs):
s behavior
differs slightly from ``git archive`` in this respect. Use
``prefix=`` to create an archive with no prefix.
.. versionchanged:: 2015.8.0
The behavior of this argument has been changed slightly. As of
this version, it is necessary to include the trailing slash when
specifying a prefix, if the prefix is intended to create a
top-level directory.
git_opts
Any additional options to add to git command itself (not the
``archive`` subcommand), in a single string. This is useful for passing
``-c`` to run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-archive(1)`: http://git-scm.com/docs/git-archive
CLI Example:
.. code-block:: bash
salt myminion git.archive /path/to/repo /path/to/archive.tar
formatformatgitarchive--prefix--format--outputt want all files listed
return True | .. versionchanged:: 2015.8.0
Returns ``True`` if successful, raises an error if not.
Interface to `git-archive(1)`_, exports a tarball/zip file of the
repository
cwd
The path to be archived
.. note::
``git archive`` permits a partial archive to be created. Thus, this
path does not need to be the root of the git repository. Only the
files within the directory specified by ``cwd`` (and its
subdirectories) will be in the resulting archive. For example, if
there is a git checkout at ``/tmp/foo``, then passing
``/tmp/foo/bar`` as the ``cwd`` will result in just the files
underneath ``/tmp/foo/bar`` to be exported as an archive.
output
The path of the archive to be created
overwrite : False
Unless set to ``True``, Salt will over overwrite an existing archive at
the path specified by the ``output`` argument.
.. versionadded:: 2015.8.0
rev : HEAD
The revision from which to create the archive
format
Manually specify the file format of the resulting archive. This
argument can be omitted, and ``git archive`` will attempt to guess the
archive type (and compression) from the filename. ``zip``, ``tar``,
``tar.gz``, and ``tgz`` are extensions that are recognized
automatically, and git can be configured to support other archive types
with the addition of git configuration keys.
See the `git-archive(1)`_ manpage explanation of the
``--format`` argument (as well as the ``CONFIGURATION`` section of the
manpage) for further information.
.. versionadded:: 2015.8.0
prefix
Prepend ``<prefix>`` to every filename in the archive. If unspecified,
the name of the directory at the top level of the repository will be
used as the prefix (e.g. if ``cwd`` is set to ``/foo/bar/baz``, the
prefix will be ``baz``, and the resulting archive will contain a
top-level directory by that name).
.. note::
The default behavior if the ``--prefix`` option for ``git archive``
is not specified is to not prepend a prefix, so Salt's behavior
differs slightly from ``git archive`` in this respect. Use
``prefix=''`` to create an archive with no prefix.
.. versionchanged:: 2015.8.0
The behavior of this argument has been changed slightly. As of
this version, it is necessary to include the trailing slash when
specifying a prefix, if the prefix is intended to create a
top-level directory.
git_opts
Any additional options to add to git command itself (not the
``archive`` subcommand), in a single string. This is useful for passing
``-c`` to run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-archive(1)`: http://git-scm.com/docs/git-archive
CLI Example:
.. code-block:: bash
salt myminion git.archive /path/to/repo /path/to/archive.tar |
12,904 | def fso_rmtree(self, path, ignore_errors=False, onerror=None):
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if self.fso_islink(path):
raise OSError()
except OSError:
onerror(os.path.islink, path, sys.exc_info())
return
names = []
try:
names = self.fso_listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = self.fso_lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
self.fso_rmtree(fullname, ignore_errors, onerror)
else:
try:
self.fso_remove(fullname)
except OSError as err:
onerror(os.remove, fullname, sys.exc_info())
try:
self.fso_rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info()) | overlays shutil.rmtree() |
12,905 | def destroy_vm_vdis(name=None, session=None, call=None):
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record[] != :
vdi_record = session.xenapi.VDI.get_record(
vbd_record[])
if not in vdi_record[]:
session.xenapi.VDI.destroy(vbd_record[])
ret[.format(x)] = vdi_record[]
x += 1
return ret | Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01 |
12,906 | def process_exception(self, request, exception):
if isinstance(exception, CasTicketException):
do_logout(request)
return HttpResponseRedirect(request.path)
else:
return None | When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again. |
12,907 | def find_uncommitted_filefields(sender, instance, **kwargs):
uncommitted = instance._uncommitted_filefields = []
fields = sender._meta.fields
if kwargs.get(, None):
update_fields = set(kwargs[])
fields = update_fields.intersection(fields)
for field in fields:
if isinstance(field, FileField):
fieldfile = getattr(instance, field.name)
if fieldfile and not fieldfile._committed:
uncommitted.append(field.name) | A pre_save signal handler which attaches an attribute to the model instance
containing all uncommitted ``FileField``s, which can then be used by the
:func:`signal_committed_filefields` post_save handler. |
12,908 | def numpy_binning(data, bins=10, range=None, *args, **kwargs) -> NumpyBinning:
if isinstance(bins, int):
if range:
bins = np.linspace(range[0], range[1], bins + 1)
else:
start = data.min()
stop = data.max()
bins = np.linspace(start, stop, bins + 1)
elif np.iterable(bins):
bins = np.asarray(bins)
else:
_, bins = np.histogram(data, bins, **kwargs)
return NumpyBinning(bins) | Construct binning schema compatible with numpy.histogram
Parameters
----------
data: array_like, optional
This is optional if both bins and range are set
bins: int or array_like
range: Optional[tuple]
(min, max)
includes_right_edge: Optional[bool]
default: True
See Also
--------
numpy.histogram |
12,909 | def dead_letter(self, description=None):
self._is_live()
details = {
: str(description) if description else "",
: str(description) if description else ""}
self._receiver._settle_deferred(
, [self.lock_token], dead_letter_details=details)
self._settled = True | Move the message to the Dead Letter queue.
The Dead Letter queue is a sub-queue that can be
used to store messages that failed to process correctly, or otherwise require further inspection
or processing. The queue can also be configured to send expired messages to the Dead Letter queue.
To receive dead-lettered messages, use `QueueClient.get_deadletter_receiver()` or
`SubscriptionClient.get_deadletter_receiver()`.
:param description: The reason for dead-lettering the message.
:type description: str
:raises: ~azure.servicebus.common.errors.MessageAlreadySettled if the message has been settled.
:raises: ~azure.servicebus.common.errors.MessageLockExpired if message lock has already expired.
:raises: ~azure.servicebus.common.errors.SessionLockExpired if session lock has already expired.
:raises: ~azure.servicebus.common.errors.MessageSettleFailed if message settle operation fails. |
12,910 | def add_record(self, is_sslv2=None, is_tls13=None):
if is_sslv2 is None and is_tls13 is None:
v = (self.cur_session.tls_version or
self.cur_session.advertised_tls_version)
if v in [0x0200, 0x0002]:
is_sslv2 = True
elif v >= 0x0304:
is_tls13 = True
if is_sslv2:
self.buffer_out.append(SSLv2(tls_session=self.cur_session))
elif is_tls13:
self.buffer_out.append(TLS13(tls_session=self.cur_session))
else:
self.buffer_out.append(TLS(tls_session=self.cur_session)) | Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out. |
12,911 | def factorset_divide(factorset1, factorset2):
r
if not isinstance(factorset1, FactorSet) or not isinstance(factorset2, FactorSet):
raise TypeError("factorset1 and factorset2 must be FactorSet instances")
return factorset1.divide(factorset2, inplace=False) | r"""
Base method for dividing two factor sets.
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors
present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`.
Parameters
----------
factorset1: FactorSet
The dividend
factorset2: FactorSet
The divisor
Returns
-------
The division of factorset1 and factorset2
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_divide
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_divide(factor_set2, factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>]) |
12,912 | def encode(self, x):
n = self._outputSize
y = np.zeros(n)
Q = self._Q
W = self._W
t = self._t
lam = self._lambda
try:
y_star = np.random.sample(n)
y_star = fixed_point(lambda p: expit(lam * ( np.dot(Q,x) + np.dot(W,p) - t)),
y_star, maxiter=2000, method=)
except RuntimeError:
pass
winner = np.where(y_star > 0.5)[0]
y[ winner ] = 1.
return y | Given an input array `x` it returns its associated encoding `y(x)`.
Please cf. the paper for more details.
Note that NO learning takes place. |
12,913 | def query_disease():
allowed_str_args = [, , , , , ]
args = get_args(
request_args=request.args,
allowed_str_args=allowed_str_args
)
return jsonify(query.disease(**args)) | Returns list of diseases by query parameters
---
tags:
- Query functions
parameters:
- name: identifier
in: query
type: string
required: false
description: Disease identifier
default: DI-03832
- name: ref_id
in: query
type: string
required: false
description: reference identifier
default: 104300
- name: ref_type
in: query
type: string
required: false
description: Reference type
default: MIM
- name: name
in: query
type: string
required: false
description: Disease name
default: Alzheimer disease
- name: acronym
in: query
type: string
required: false
description: Disease acronym
default: AD
- name: description
in: query
type: string
required: false
description: Description of disease
default: '%neurodegenerative disorder%'
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10 |
12,914 | def hash_from_stream(n, hash_stream):
_to_int64 = to_int64
x = 0x345678
multiplied = _to_int64(1000003)
for n in range(n - 1, -1, -1):
h = next(hash_stream)
x = _to_int64((x ^ h) * multiplied)
multiplied += _to_int64(82520 + _to_int64(2 * n))
multiplied = _to_int64(multiplied)
x += 97531
x = _to_int64(x)
if x == -1:
return -2
return x | Not standard hashing algorithm!
Install NumPy for better hashing service.
>>> from Redy.Tools._py_hash import hash_from_stream
>>> s = iter((1, 2, 3))
>>> assert hash_from_stream(3, map(hash, s)) == hash((1, 2, 3)) |
12,915 | def join(self, *args):
call_args = list(args)
joiner = call_args.pop(0)
self.random.shuffle(call_args)
return joiner.join(call_args) | Returns the arguments in the list joined by STR.
FIRST,JOIN_BY,ARG_1,...,ARG_N
%{JOIN: ,A,...,F} -> 'A B C ... F' |
12,916 | def get_strategy_types():
def get_subtypes(type_):
subtypes = type_.__subclasses__()
for subtype in subtypes:
subtypes.extend(get_subtypes(subtype))
return subtypes
return get_subtypes(Strategy) | Get a list of all :class:`Strategy` subclasses. |
12,917 | def get_deployments(self, project, definition_id=None, definition_environment_id=None, created_by=None, min_modified_time=None, max_modified_time=None, deployment_status=None, operation_status=None, latest_attempts_only=None, query_order=None, top=None, continuation_token=None, created_for=None, min_started_time=None, max_started_time=None, source_branch=None):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
query_parameters = {}
if definition_id is not None:
query_parameters[] = self._serialize.query(, definition_id, )
if definition_environment_id is not None:
query_parameters[] = self._serialize.query(, definition_environment_id, )
if created_by is not None:
query_parameters[] = self._serialize.query(, created_by, )
if min_modified_time is not None:
query_parameters[] = self._serialize.query(, min_modified_time, )
if max_modified_time is not None:
query_parameters[] = self._serialize.query(, max_modified_time, )
if deployment_status is not None:
query_parameters[] = self._serialize.query(, deployment_status, )
if operation_status is not None:
query_parameters[] = self._serialize.query(, operation_status, )
if latest_attempts_only is not None:
query_parameters[] = self._serialize.query(, latest_attempts_only, )
if query_order is not None:
query_parameters[] = self._serialize.query(, query_order, )
if top is not None:
query_parameters[] = self._serialize.query(, top, )
if continuation_token is not None:
query_parameters[] = self._serialize.query(, continuation_token, )
if created_for is not None:
query_parameters[] = self._serialize.query(, created_for, )
if min_started_time is not None:
query_parameters[] = self._serialize.query(, min_started_time, )
if max_started_time is not None:
query_parameters[] = self._serialize.query(, max_started_time, )
if source_branch is not None:
query_parameters[] = self._serialize.query(, source_branch, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, self._unwrap_collection(response)) | GetDeployments.
:param str project: Project ID or project name
:param int definition_id:
:param int definition_environment_id:
:param str created_by:
:param datetime min_modified_time:
:param datetime max_modified_time:
:param str deployment_status:
:param str operation_status:
:param bool latest_attempts_only:
:param str query_order:
:param int top:
:param int continuation_token:
:param str created_for:
:param datetime min_started_time:
:param datetime max_started_time:
:param str source_branch:
:rtype: [Deployment] |
12,918 | def reload_plugin(self, name, *args):
self._logger.debug("Reloading {}.".format(name))
self._logger.debug("Disabling {}.".format(name))
self.get_plugin(name).disable()
self._logger.debug("Removing plugin instance.")
del self._plugins[name]
self._logger.debug("Unloading module.")
del self._modules[name]
self._logger.debug("Reloading manifest.")
old_manifest = self.get_manifest(name)
self._manifests.remove(old_manifest)
self.load_manifest(old_manifest["path"])
self._logger.debug("Loading {}.".format(name))
self.load_plugin(self.get_manifest(name), *args)
self._logger.debug("Enabling {}.".format(name))
self.get_plugin(name).enable()
self._logger.debug("Plugin {} reloaded.".format(name)) | Reloads a given plugin
:param name: The name of the plugin
:param args: The args to pass to the plugin |
12,919 | def set_org_disclaimer(self):
is_checked = self.custom_org_disclaimer_checkbox.isChecked()
if is_checked:
org_disclaimer = setting(
,
default=disclaimer(),
expected_type=str,
qsettings=self.settings)
else:
org_disclaimer = disclaimer()
self.txtDisclaimer.setPlainText(org_disclaimer)
self.txtDisclaimer.setEnabled(is_checked) | Auto-connect slot activated when org disclaimer checkbox is toggled. |
12,920 | def ingest(self, text, logMessage=None):
http_args = {}
if logMessage:
http_args[] = logMessage
headers = {: }
url =
if isinstance(text, six.text_type):
text = bytes(text.encode())
return self.post(url, data=text, params=http_args, headers=headers) | Ingest a new object into Fedora. Returns the pid of the new object on success.
Return response should have a status of 201 Created on success, and
the content of the response will be the newly created pid.
Wrapper function for `Fedora REST API ingest <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-ingest>`_
:param text: full text content of the object to be ingested
:param logMessage: optional log message
:rtype: :class:`requests.models.Response` |
12,921 | def learn_transportation_mode(track, clf):
for segment in track.segments:
tmodes = segment.transportation_modes
points = segment.points
features = []
labels = []
for tmode in tmodes:
points_part = points[tmode[]:tmode[]]
if len(points_part) > 0:
features.append(extract_features_2(points_part))
labels.append(tmode[])
clf.learn(features, labels) | Inserts transportation modes of a track into a classifier
Args:
track (:obj:`Track`)
clf (:obj:`Classifier`) |
12,922 | def parse_instance_count(instance_count, speaker_total_count):
result = copy.copy(speaker_total_count)
for speaker_id, count in instance_count.items():
speaker_id = str(speaker_id)
speaker_total = speaker_total_count.get(speaker_id, 0)
if type(count) == float and 0.0 <= count <= 1.0:
result[speaker_id] = int(speaker_total * count)
else:
result[speaker_id] = int(count)
return result | This parses the instance count dictionary
(that may contain floats from 0.0 to 1.0 representing a percentage)
and converts it to actual instance count. |
12,923 | def getScreenshotPropertyType(self, screenshotHandle):
fn = self.function_table.getScreenshotPropertyType
pError = EVRScreenshotError()
result = fn(screenshotHandle, byref(pError))
return result, pError | When your application receives a
VREvent_RequestScreenshot event, call these functions to get
the details of the screenshot request. |
12,924 | def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r
return Differ(linejunk, charjunk).compare(a, b) | r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu |
12,925 | def nonwhitelisted_allowed_principals(self, whitelist=None):
if not whitelist:
return []
nonwhitelisted = []
for statement in self.statements:
if statement.non_whitelisted_principals(whitelist) and statement.effect == "Allow":
nonwhitelisted.append(statement)
return nonwhitelisted | Find non whitelisted allowed principals. |
12,926 | def network_from_df(self, df):
teneto.utils.check_TemporalNetwork_input(df, )
self.network = df
self._update_network() | Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge. |
12,927 | def get_action_side_effects(self):
result = SCons.Util.UniqueList([])
for target in self.get_action_targets():
result.extend(target.side_effects)
return result | Returns all side effects for all batches of this
Executor used by the underlying Action. |
12,928 | def query(conn_type, option, post_data=None):
if ticket is None or csrf is None or url is None:
log.debug()
_authenticate()
full_url = .format(url, port, option)
log.debug(, conn_type, full_url, post_data)
httpheaders = {: ,
: ,
: }
if conn_type == :
httpheaders[] = csrf
response = requests.post(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == :
httpheaders[] = csrf
response = requests.put(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == :
httpheaders[] = csrf
response = requests.delete(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == :
response = requests.get(full_url, verify=verify_ssl,
cookies=ticket)
response.raise_for_status()
try:
returned_data = response.json()
if not in returned_data:
raise SaltCloudExecutionFailure
return returned_data[]
except Exception:
log.error()
log.error(response) | Execute the HTTP request to the API |
12,929 | def list_items(cls, repo, *args, **kwargs):
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list | Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances |
12,930 | def mem_extend(self, start: int, size: int) -> None:
m_extend = self.calculate_extension_size(start, size)
if m_extend:
extend_gas = self.calculate_memory_gas(start, size)
self.min_gas_used += extend_gas
self.max_gas_used += extend_gas
self.check_gas()
self.memory.extend(m_extend) | Extends the memory of this machine state.
:param start: Start of memory extension
:param size: Size of memory extension |
12,931 | def cmyk(self):
c, m, y = self.cmy
k = min(c, m, y)
if k != 1:
c = (c - k) / (1 - k)
m = (m - k) / (1 - k)
y = (y - k) / (1 - k)
else:
c, m, y = 1, 1, 1
cmyk = (c, m, y, k)
return tuple(map(lambda x: self._apply_float_bounds(x), cmyk)) | CMYK: all returned in range 0.0 - 1.0 |
12,932 | def indices(this, that, axis=semantics.axis_default, missing=):
this = as_index(this, axis=axis, lex_as_struct=True)
that = as_index(that, axis=axis, base=True, lex_as_struct=True)
insertion = np.searchsorted(this._keys, that._keys, sorter=this.sorter, side=)
indices = np.take(this.sorter, insertion, mode=)
if missing != :
invalid = this._keys[indices] != that._keys
if missing == :
if np.any(invalid):
raise KeyError()
elif missing == :
indices = np.ma.masked_array(indices, invalid)
else:
indices[invalid] = missing
return indices | Find indices such that this[indices] == that
If multiple indices satisfy this condition, the first index found is returned
Parameters
----------
this : indexable object
items to search in
that : indexable object
items to search for
axis : int, optional
axis to operate on
missing : {'raise', 'ignore', 'mask' or int}
if `missing` is 'raise', a KeyError is raised if not all elements of `that` are present in `this`
if `missing` is 'mask', a masked array is returned,
where items of `that` not present in `this` are masked out
if `missing` is 'ignore', all elements of `that` are assumed to be present in `this`,
and output is undefined otherwise
if missing is an integer, this is used as a fill-value
Returns
-------
indices : ndarray, [that.size], int
indices such that this[indices] == that
Notes
-----
May be regarded as a vectorized numpy equivalent of list.index |
12,933 | def get(self, entry):
try:
list = self.cache[entry.key]
return list[list.index(entry)]
except:
return None | Gets an entry by key. Will return None if there is no
matching entry. |
12,934 | def to_unicode(s):
if not isinstance(s, six.string_types):
raise ValueError("{} must be str or unicode.".format(s))
if not isinstance(s, six.text_type):
s = six.text_type(s, )
return s | Return the object as unicode (only matters for Python 2.x).
If s is already Unicode, return s as is.
Otherwise, assume that s is UTF-8 encoded, and convert to Unicode.
:param (basestring) s: a str, unicode or other basestring object
:return (unicode): the object as unicode |
12,935 | def enable_caching(self):
"Enable the cache of this object."
self.caching_enabled = True
for c in self.values():
c.enable_cacher() | Enable the cache of this object. |
12,936 | def handle_profile_delete(self, sender, instance, **kwargs):
try:
self.handle_save(instance.user.__class__, instance.user)
except (get_profile_model().DoesNotExist):
pass | Custom handler for user profile delete |
12,937 | def add(repo_path, dest_path):
mkcfgdir()
try:
repo = getrepohandler(repo_path)
except NotARepo as err:
echo("ERROR: {}: {}".format(ERR_NOT_A_REPO, err.repo_path))
sys.exit(1)
if repo.isremote:
localrepo, needpull = addfromremote(repo, dest_path)
elif dest_path:
raise UsageError("DEST_PATH is only for repos hosted online")
else:
try:
repoid = repo.getrepoid()
except RepoHasNoCommitsError as err:
echo("ERROR: {}".format(ERR_NO_COMMITS))
sys.exit(1)
localrepo = RepoInfo(repo, repoid, None)
needpull = False
if not localrepo:
return
with saveconfig(RepoListConfig()) as cfg:
cfg.add_repo(localrepo)
success = run_update([localrepo], pullfirst=needpull, cancleanup=True)
if not success:
sys.exit(1) | Registers a git repository with homely so that it will run its `HOMELY.py`
script on each invocation of `homely update`. `homely add` also immediately
executes a `homely update` so that the dotfiles are installed straight
away. If the git repository is hosted online, a local clone will be created
first.
REPO_PATH
A path to a local git repository, or the URL for a git repository
hosted online. If REPO_PATH is a URL, then it should be in a format
accepted by `git clone`. If REPO_PATH is a URL, you may also specify
DEST_PATH.
DEST_PATH
If REPO_PATH is a URL, then the local clone will be created at
DEST_PATH. If DEST_PATH is omitted then the path to the local clone
will be automatically derived from REPO_PATH. |
12,938 | def _generate_autoscaling_metadata(self, cls, args):
assert isinstance(args, Mapping)
init_config = self._create_instance(
cloudformation.InitConfig,
args[][])
init = self._create_instance(
cloudformation.Init, {: init_config})
auth = None
if in args:
auth_blocks = {}
for k in args[]:
auth_blocks[k] = self._create_instance(
cloudformation.AuthenticationBlock,
args[][k],
k)
auth = self._create_instance(
cloudformation.Authentication, auth_blocks)
return cls(init, auth) | Provides special handling for the autoscaling.Metadata object |
12,939 | def steal_docstring_from(obj):
def deco(fn):
docs = [obj.__doc__]
if fn.__doc__:
docs.append(fn.__doc__)
fn.__doc__ = .join(docs)
return fn
return deco | Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween. |
12,940 | def add_entity(self, rdf_type, superclass, label, definition=None):
t exist and has a usable
superclass ILX ID and rdf:type
owl:Classtermowl:Classtermcdeannotationrelationshipfderdf_type must be one of the following: {accepted_types}data{superclass} is does not exist and cannot be used as a superclass.datalabelilxdatahttps://scicrunch.org/api/1/user/info?key={api_key}datauididEntity {label} already created by you with ILX ID {ilx_id} and of type {rdf_type}ilxtypetypesuperclassessuperclassessuperclassesilxilxEntity {label} already exisits with ILX ID {ilx_id} and of type {rdf_type}ilxtypeilx/addtermsuperclassesididilxilxtypedatailxilxfragmentterm/addlabel&
: ilx_id,
: [{
: superclass_data[],
: superclass_data[]}],
: rdf_type}
data = superclasses_bug_fix(data)
if definition:
data.update({:definition})
return self.post(url, data) | Adds entity as long as it doesn't exist and has a usable
superclass ILX ID and rdf:type |
12,941 | def ensure_directory(directory):
directory = os.path.expanduser(directory)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise e | Create the directories along the provided directory path that do not exist. |
12,942 | def write_options_to_JSON(self, filename):
fd = open(filename, "w")
fd.write(json.dumps(_options_to_dict(self.gc), indent=2,
separators=(, )))
fd.close() | Writes the options in JSON format to a file.
:param str filename: Target file to write the options. |
12,943 | def size(self, destination):
if not destination in self.queue_metadata:
return 0
else:
return len(self.queue_metadata[destination][]) | Size of the queue for specified destination.
@param destination: The queue destination (e.g. /queue/foo)
@type destination: C{str}
@return: The number of frames in specified queue.
@rtype: C{int} |
12,944 | def json_serializer(pid, data, *args):
if data is not None:
response = Response(
json.dumps(data.dumps()),
mimetype=
)
else:
response = Response(mimetype=)
return response | Build a JSON Flask response using the given data.
:param pid: The `invenio_pidstore.models.PersistentIdentifier` of the
record.
:param data: The record metadata.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`. |
12,945 | def as_proto(self):
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value
)
for d in self._dims
]
) | Returns this shape as a `TensorShapeProto`. |
12,946 | def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
"Normalize `x` with `mean` and `std`."
return (x-mean[...,None,None]) / std[...,None,None] | Normalize `x` with `mean` and `std`. |
12,947 | def sim(self, args):
if not self._started:
raise ApplicationNotStarted("BACnet stack not running - use startApp()")
args = args.split()
addr, obj_type, obj_inst, prop_id, value = args[:5]
if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)):
self.write(
"{} {} {} {} {}".format(addr, obj_type, obj_inst, prop_id, value)
)
else:
try:
self.write(
"{} {} {} outOfService True".format(addr, obj_type, obj_inst)
)
except NoResponseFromController:
pass
try:
if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)):
self.write(
"{} {} {} {} {}".format(
addr, obj_type, obj_inst, prop_id, value
)
)
else:
raise OutOfServiceNotSet()
except NoResponseFromController:
pass | Simulate I/O points by setting the Out_Of_Service property, then doing a
WriteProperty to the point's Present_Value.
:param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ] |
12,948 | def Hk(self, k, m_pred, P_pred):
return self.H[:, :, int(self.index[self.H_time_var_index, k])] | function (k, m, P) return Jacobian of measurement function, it is
passed into p_h.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix. |
12,949 | def wald_wolfowitz(sequence):
R = n_runs = sum(1 for s in groupby(sequence, lambda a: a))
n = float(sum(1 for s in sequence if s == sequence[0]))
m = float(sum(1 for s in sequence if s != sequence[0]))
ER = ((2 * n * m ) / (n + m)) + 1
VR = (2 * n * m * (2 * n * m - n - m )) / ((n + m)**2 * (n + m - 1))
O = (ER - 1) * (ER - 2) / (n + m - 1.)
assert VR - O < 0.001, (VR, O)
SD = math.sqrt(VR)
Z = (R - ER) / SD
return {: Z, : ER, : SD, : zprob(Z), : R} | implements the wald-wolfowitz runs test:
http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test
http://support.sas.com/kb/33/092.html
:param sequence: any iterable with at most 2 values. e.g.
'1001001'
[1, 0, 1, 0, 1]
'abaaabbba'
:rtype: a dict with keys of
`n_runs`: the number of runs in the sequence
`p`: the support to reject the null-hypothesis that the number of runs
supports a random sequence
`z`: the z-score, used to calculate the p-value
`sd`, `mean`: the expected standard deviation, mean the number of runs,
given the ratio of numbers of 1's/0's in the sequence
>>> r = wald_wolfowitz('1000001')
>>> r['n_runs'] # should be 3, because 1, 0, 1
3
>>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence
False
# this should show significance for non-randomness
>>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
>>> wald_wolfowitz(li)['p'] < 0.05
True |
12,950 | def getSizeFromPage(rh, page):
rh.printSysLog("Enter generalUtils.getSizeFromPage")
bSize = float(page) * 4096
mSize = cvtToMag(rh, bSize)
rh.printSysLog("Exit generalUtils.getSizeFromPage, magSize: " + mSize)
return mSize | Convert a size value from page to a number with a magnitude appended.
Input:
Request Handle
Size in page
Output:
Converted value with a magnitude |
12,951 | def key56_to_key64(key):
if len(key) != 7:
raise ValueError("DES 7-byte key is not 7 bytes in length, "
"actual: %d" % len(key))
new_key = b""
for i in range(0, 8):
if i == 0:
new_value = struct.unpack("B", key[i:i+1])[0]
elif i == 7:
new_value = struct.unpack("B", key[6:7])[0]
new_value = (new_value << 1) & 0xFF
else:
new_value = struct.unpack("B", key[i - 1:i])[0]
next_value = struct.unpack("B", key[i:i + 1])[0]
new_value = ((new_value << (8 - i)) & 0xFF) | next_value >> i
new_value = new_value & ~(1 << 0)
new_value = new_value | int(not DES.bit_count(new_value) & 0x1)
new_key += struct.pack("B", new_value)
return new_key | This takes in an a bytes string of 7 bytes and converts it to a bytes
string of 8 bytes with the odd parity bit being set to every 8 bits,
For example
b"\x01\x02\x03\x04\x05\x06\x07"
00000001 00000010 00000011 00000100 00000101 00000110 00000111
is converted to
b"\x01\x80\x80\x61\x40\x29\x19\x0E"
00000001 10000000 10000000 01100001 01000000 00101001 00011001 00001110
https://crypto.stackexchange.com/questions/15799/des-with-actual-7-byte-key
:param key: 7-byte string sized key
:return: 8-byte string with the parity bits sets from the 7-byte string |
12,952 | def getContactUIDForUser(self):
membership_tool = api.get_tool("portal_membership")
member = membership_tool.getAuthenticatedMember()
username = member.getUserName()
r = self.portal_catalog(
portal_type="Contact",
getUsername=username
)
if len(r) == 1:
return r[0].UID | Get the UID of the user associated with the authenticated user |
12,953 | def get_value_tuple(self):
retval = tuple()
for val in self.VALUES:
retval += (getattr(self, val),)
return retval | Returns a tuple of the color's values (in order). For example,
an LabColor object will return (lab_l, lab_a, lab_b), where each
member of the tuple is the float value for said variable. |
12,954 | def blk_1d(blk, shape):
maxpix, rem = blk_coverage_1d(blk, shape)
for i in range(0, maxpix, blk):
yield slice(i, i + blk)
if rem != 0:
yield slice(maxpix, shape) | Iterate through the slices that recover a line.
This function is used by :func:`blk_nd` as a base 1d case.
The last slice is returned even if is lesser than blk.
:param blk: the size of the block
:param shape: the size of the array
:return: a generator that yields the slices |
12,955 | def unpack(self, buff, offset=0):
super().unpack(buff, offset)
self.version = self._version_ihl.value >> 4
self.ihl = self._version_ihl.value & 15
self.dscp = self._dscp_ecn.value >> 2
self.ecn = self._dscp_ecn.value & 3
self.length = self.length.value
self.identification = self.identification.value
self.flags = self._flags_offset.value >> 13
self.offset = self._flags_offset.value & 8191
self.ttl = self.ttl.value
self.protocol = self.protocol.value
self.checksum = self.checksum.value
self.source = self.source.value
self.destination = self.destination.value
if self.ihl > 5:
options_size = (self.ihl - 5) * 4
self.data = self.options.value[options_size:]
self.options = self.options.value[:options_size]
else:
self.data = self.options.value
self.options = b | Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails. |
12,956 | def time2slurm(timeval, unit="s"):
d, h, m, s = 24*3600, 3600, 60, 1
timeval = Time(timeval, unit).to("s")
days, hours = divmod(timeval, d)
hours, minutes = divmod(hours, h)
minutes, secs = divmod(minutes, m)
return "%d-%d:%d:%d" % (days, hours, minutes, secs) | Convert a number representing a time value in the given unit (Default: seconds)
to a string following the slurm convention: "days-hours:minutes:seconds".
>>> assert time2slurm(61) == '0-0:1:1' and time2slurm(60*60+1) == '0-1:0:1'
>>> assert time2slurm(0.5, unit="h") == '0-0:30:0' |
12,957 | def visit_RETURN(self, node):
if len(node.children) == 2:
node.children[1] = (yield ToVisit(node.children[1]))
yield node | Visits only children[1], since children[0] points to
the current function being returned from (if any), and
might cause infinite recursion. |
12,958 | def add(self, opener):
index = len(self.openers)
self.openers[index] = opener
for name in opener.names:
self.registry[name] = index | Adds an opener to the registry
:param opener: Opener object
:type opener: Opener inherited object |
12,959 | def copy(self):
return _TimeAnchor(self.reading_id, self.uptime, self.utc, self.is_break, self.exact) | Return a copy of this _TimeAnchor. |
12,960 | def mask_unphysical(self, data):
if not self.valid_range:
return data
else:
return np.ma.masked_outside(data, np.min(self.valid_range),
np.max(self.valid_range)) | Mask data array where values are outside physically valid range. |
12,961 | def make_defaults_and_annotations(make_function_instr, builders):
n_defaults, n_kwonlydefaults, n_annotations = unpack_make_function_arg(
make_function_instr.arg
)
if n_annotations:
load_annotation_names = builders.pop()
annotations = dict(zip(
reversed(load_annotation_names.arg),
(make_expr(builders) for _ in range(n_annotations - 1))
))
else:
annotations = {}
kwonlys = {}
while n_kwonlydefaults:
default_expr = make_expr(builders)
key_instr = builders.pop()
if not isinstance(key_instr, instrs.LOAD_CONST):
raise DecompilationError(
"kwonlydefault key is not a LOAD_CONST: %s" % key_instr
)
if not isinstance(key_instr.arg, str):
raise DecompilationError(
"kwonlydefault key builder is not a "
"'LOAD_CONST of a string: %s" % key_instr
)
kwonlys[key_instr.arg] = default_expr
n_kwonlydefaults -= 1
defaults = make_exprs(builders, n_defaults)
return defaults, kwonlys, annotations | Get the AST expressions corresponding to the defaults, kwonly defaults, and
annotations for a function created by `make_function_instr`. |
12,962 | def show_top(queue=False, **kwargs):
*
if in kwargs:
kwargs.pop()
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts[])
if errors:
__context__[] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError(, info=errors)
errors = []
top_ = st_.get_top()
errors += st_.verify_tops(top_)
if errors:
__context__[] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
matches = st_.top_matches(top_)
return matches | Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top |
12,963 | def getHTML(self):
root = self.getRoot()
if root is None:
raise ValueError()
if self.doctype:
doctypeStr = %(self.doctype)
else:
doctypeStr =
rootNode = self.getRoot()
if rootNode.tagName == INVISIBLE_ROOT_TAG:
return doctypeStr + rootNode.innerHTML
else:
return doctypeStr + rootNode.outerHTML | getHTML - Get the full HTML as contained within this tree.
If parsed from a document, this will contain the original whitespacing.
@returns - <str> of html
@see getFormattedHTML
@see getMiniHTML |
12,964 | def getMaximinScores(profile):
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
wmgMap = profile.getWmg()
maximinscores = {}
for cand in wmgMap.keys():
maximinscores[cand] = float("inf")
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
maximinscores[cand1] = min(maximinscores[cand1], wmgMap[cand1][cand2])
maximinscores[cand2] = min(maximinscores[cand2], wmgMap[cand2][cand1])
return maximinscores | Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile. |
12,965 | def postprocess(x, n_bits_x=8):
x = tf.where(tf.is_finite(x), x, tf.ones_like(x))
x = tf.clip_by_value(x, -0.5, 0.5)
x += 0.5
x = x * 2**n_bits_x
return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8) | Converts x from [-0.5, 0.5], to [0, 255].
Args:
x: 3-D or 4-D Tensor normalized between [-0.5, 0.5]
n_bits_x: Number of bits representing each pixel of the output.
Defaults to 8, to default to 256 possible values.
Returns:
x: 3-D or 4-D Tensor representing images or videos. |
12,966 | def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError()
if self._current_offset < 0:
raise IOError(
.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._decrypted_stream_size is None:
self._decrypted_stream_size = self._GetDecryptedStreamSize()
if self._decrypted_stream_size is None:
raise IOError()
offset += self._decrypted_stream_size
elif whence != os.SEEK_SET:
raise IOError()
if offset < 0:
raise IOError()
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek.
whence (Optional[int]): value that indicates whether offset is an
absolute or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. |
12,967 | def commit_analyzer(commits, label_pattern, label_position="footer"):
pattern_string = re.escape(label_pattern)
action_cgp = r"(?P<type>\w+)"
scope_cgp = r"(?P<scope>\w*)"
subject_cgp = r"(?P<subject>.+)"
pattern_string = pattern_string.replace(r"\{type\}", action_cgp).replace(
r"\{scope\}", scope_cgp
)
if label_position == "header":
pattern_string = pattern_string.replace(r"\{subject\}", subject_cgp)
regexp_pattern = re.compile(pattern_string)
semantic_commits = []
for commit in commits:
text = commit.header if label_position == "header" else commit.footer
match = regexp_pattern.search(text)
if not match:
continue
metadata = match.groupdict()
if label_position == "footer":
metadata["subject"] = commit.header
sc = SemanticCommit(
subject=metadata["subject"].strip(),
type=metadata["type"],
scope=metadata.get("scope") or None,
message=commit.message,
)
semantic_commits.append(sc)
return semantic_commits | Analyzes a list of :class:`~braulio.git.Commit` objects searching for
messages that match a given message convention and extract metadata from
them.
A message convention is determined by ``label_pattern``, which is not a
regular expression pattern. Instead it must be a string literals with
placeholders that indicates metadata information in a given position of
the commit message. The possible placeholders are ``{type}``, ``{scope}``
and ``{subject}``.
The ``label_position`` argument dictates where (header|footer) to look in
the commit message for the pattern passed in ```label_pattern``.
``{subject}`` must be included in ``label_pattern`` just if the metadata is
in the header, otherwise must be omitted.
Examples.
If ``label_position`` is equal to **header**, in order to match the next
commit message::
fix(cli): Ensure --help option doesn't hang
The pattern must be ``{type}({scope}): {subject}``, where the metadata
information extracted will be::
{
'type': 'fix',
'scope': 'cli',
'subject': 'Ensure --help option doesn't hang'
} |
12,968 | def create_database(self, name):
statement = "CREATE DATABASE {0} DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci".format(wrap(name))
return self.execute(statement) | Create a new database. |
12,969 | def update_task(self, differences):
self.log(.format(differences))
object_name = .format(self.app_label, self.instance.master._meta.verbose_name)
lang = self.instance.language_code
object_pk = self.instance.master.pk
for field in differences:
value = getattr(self.instance, field)
if value is None or value == :
continue
try:
TransTask.objects.filter(
language__code=lang, object_field=field, object_name=object_name, object_pk=object_pk
).update(done=True, date_modification=datetime.now(), object_field_value_translation=value)
self.log()
except TransTask.DoesNotExist:
self.log(.format(lang, field, object_name, object_pk)) | Updates a task as done if we have a new value for this alternative language
:param differences:
:return: |
12,970 | def add_default_plugins(self, except_global=[], except_local=[]):
for spec in plugins:
ptype = spec.get(, )
if ptype == and spec.module not in except_global:
self.add_plugin_spec(spec)
if ptype == and spec.module not in except_local:
self.add_plugin_spec(spec) | Add the ginga-distributed default set of plugins to the
reference viewer. |
12,971 | def locked_get(self):
credential = self._backend.locked_get(self._key)
if credential is not None:
credential.set_store(self)
return credential | Retrieves the current credentials from the store.
Returns:
An instance of :class:`oauth2client.client.Credentials` or `None`. |
12,972 | def get_value_for_expr(self, expr, target):
if expr in LOGICAL_OPERATORS.values():
return None
rvalue = expr[]
if rvalue == HISTORICAL:
history = self.history[target]
if len(history) < self.history_size:
return None
rvalue = sum(history) / float(len(history))
rvalue = expr[](rvalue)
return rvalue | I have no idea. |
12,973 | def get_message_handler(self, message_handlers):
encoder = self.options.encoder
try:
return message_handlers[encoder]
except KeyError:
raise NotImplementedError( % encoder) | Create a MessageHandler for the configured Encoder
:param message_handlers: a dictionart of MessageHandler keyed by encoder
:return: a MessageHandler |
12,974 | def jam_pack(jam, **kwargs):
foobar
if not hasattr(jam.sandbox, ):
jam.sandbox.muda = jams.Sandbox(**jam.sandbox.muda)
jam.sandbox.muda.update(**kwargs)
return jam | Pack data into a jams sandbox.
If not already present, this creates a `muda` field within `jam.sandbox`,
along with `history`, `state`, and version arrays which are populated by
deformation objects.
Any additional fields can be added to the `muda` sandbox by supplying
keyword arguments.
Parameters
----------
jam : jams.JAMS
A JAMS object
Returns
-------
jam : jams.JAMS
The updated JAMS object
Examples
--------
>>> jam = jams.JAMS()
>>> muda.jam_pack(jam, my_data=dict(foo=5, bar=None))
>>> jam.sandbox
<Sandbox: muda>
>>> jam.sandbox.muda
<Sandbox: state, version, my_data, history>
>>> jam.sandbox.muda.my_data
{'foo': 5, 'bar': None} |
12,975 | def get_observation_fields(search_query: str="", page: int=1) -> List[Dict[str, Any]]:
payload = {
: search_query,
: page
}
response = requests.get("{base_url}/observation_fields.json".format(base_url=INAT_BASE_URL), params=payload)
return response.json() | Search the (globally available) observation
:param search_query:
:param page:
:return: |
12,976 | def _learn_init_params(self, n_calib_beats=8):
if self.verbose:
print()
last_qrs_ind = -self.rr_max
qrs_inds = []
qrs_amps = []
noise_amps = []
ricker_wavelet = signal.ricker(self.qrs_radius * 2, 4).reshape(-1,1)
peak_inds_f = find_local_peaks(self.sig_f, self.qrs_radius)
peak_nums_r = np.where(peak_inds_f > self.qrs_width)[0]
peak_nums_l = np.where(peak_inds_f <= self.sig_len - self.qrs_width)[0]
if (not peak_inds_f.size or not peak_nums_r.size
or not peak_nums_l.size):
if self.verbose:
print(
% n_calib_beats)
self._set_default_init_params()
return
for peak_num in range(peak_nums_r[0], peak_nums_l[-1]):
i = peak_inds_f[peak_num]
sig_segment = normalize((self.sig_f[i - self.qrs_radius:
i + self.qrs_radius]).reshape(-1, 1), axis=0)
xcorr = np.correlate(sig_segment[:, 0], ricker_wavelet[:,0])
if xcorr > 0.6 and i-last_qrs_ind > self.rr_min:
last_qrs_ind = i
qrs_inds.append(i)
qrs_amps.append(self.sig_i[i])
else:
noise_amps.append(self.sig_i[i])
if len(qrs_inds) == n_calib_beats:
break
if len(qrs_inds) == n_calib_beats:
if self.verbose:
print( % n_calib_beats
+ )
qrs_amp = np.mean(qrs_amps)
if noise_amps:
noise_amp = np.mean(noise_amps)
else:
noise_amp = qrs_amp / 10
rr_intervals = np.diff(qrs_inds)
rr_intervals = rr_intervals[rr_intervals < self.rr_max]
if rr_intervals.any():
rr_recent = np.mean(rr_intervals)
else:
rr_recent = self.rr_init
last_qrs_ind = min(0, qrs_inds[0] - self.rr_min - 1)
self._set_init_params(qrs_amp_recent=qrs_amp,
noise_amp_recent=noise_amp,
rr_recent=rr_recent,
last_qrs_ind=last_qrs_ind)
self.learned_init_params = True
else:
if self.verbose:
print(
% n_calib_beats)
self._set_default_init_params() | Find a number of consecutive beats and use them to initialize:
- recent qrs amplitude
- recent noise amplitude
- recent rr interval
- qrs detection threshold
The learning works as follows:
- Find all local maxima (largest sample within `qrs_radius`
samples) of the filtered signal.
- Inspect the local maxima until `n_calib_beats` beats are
found:
- Calculate the cross-correlation between a ricker wavelet of
length `qrs_width`, and the filtered signal segment centered
around the local maximum.
- If the cross-correlation exceeds 0.6, classify it as a beat.
- Use the beats to initialize the previously described
parameters.
- If the system fails to find enough beats, the default
parameters will be used instead. See the docstring of
`XQRS._set_default_init_params` for detauls.
Parameters
----------
n_calib_beats : int, optional
Number of calibration beats to detect for learning |
12,977 | def _convert_default_value(self, default):
if default is None:
return None
if isinstance(default, str):
if self.special_type == :
return default.encode() + b
raise DataError("You can only pass a unicode string if you are declaring a string type config variable", default=default)
if isinstance(default, (bytes, bytearray)):
if self.special_type == and isinstance(default, bytes):
default += b
return default
if isinstance(default, int):
default = [default]
format_string = "<" + (self.base_type*len(default))
return struct.pack(format_string, *default) | Convert the passed default value to binary.
The default value (if passed) may be specified as either a `bytes`
object or a python int or list of ints. If an int or list of ints is
passed, it is converted to binary. Otherwise, the raw binary data is
used.
If you pass a bytes object with python_type as True, do not null terminate
it, an additional null termination will be added.
Passing a unicode string is only allowed if as_string is True and it
will be encoded as utf-8 and null terminated for use as a default value. |
12,978 | def panels(self):
ax1 = self.fig.add_subplot(211)
ax2 = self.fig.add_subplot(212, sharex=ax1)
return (ax2, self.gene_panel), (ax1, self.signal_panel) | Add 2 panels to the figure, top for signal and bottom for gene models |
12,979 | def add_document(self, question, answer):
question = question.strip()
answer = answer.strip()
session = self.Session()
if session.query(Document) \
.filter_by(text=question, answer=answer).count():
logger.info(.format(question, answer))
return
logger.info(.format(question, answer))
grams = self._get_grams(session, question, make=True)
doc = Document(question, answer)
doc.grams = list(grams)
self._recalc_idfs(session, grams)
session.add(doc)
session.commit() | Add question answer set to DB.
:param question: A question to an answer
:type question: :class:`str`
:param answer: An answer to a question
:type answer: :class:`str` |
12,980 | def should_execute(self, workload):
if not self._suspended.is_set():
return True
workload = unwrap_workload(workload)
return hasattr(workload, ) and getattr(workload, ) | If we have been suspended by i3bar, only execute those modules that set the keep_alive flag to a truthy
value. See the docs on the suspend_signal_handler method of the io module for more information. |
12,981 | def _GetMemberDataTypeMaps(self, data_type_definition, data_type_map_cache):
if not data_type_definition:
raise errors.FormatError()
members = getattr(data_type_definition, , None)
if not members:
raise errors.FormatError()
data_type_maps = []
members_data_size = 0
for member_definition in members:
if isinstance(member_definition, data_types.MemberDataTypeDefinition):
member_definition = member_definition.member_data_type_definition
if (data_type_definition.byte_order != definitions.BYTE_ORDER_NATIVE and
member_definition.byte_order == definitions.BYTE_ORDER_NATIVE):
member_definition = copy.copy(member_definition)
member_definition.name = .format(
data_type_definition.name, member_definition.name)
member_definition.byte_order = data_type_definition.byte_order
if member_definition.name not in data_type_map_cache:
data_type_map = DataTypeMapFactory.CreateDataTypeMapByType(
member_definition)
data_type_map_cache[member_definition.name] = data_type_map
data_type_map = data_type_map_cache[member_definition.name]
if members_data_size is not None:
if not isinstance(member_definition, data_types.PaddingDefinition):
byte_size = member_definition.GetByteSize()
else:
_, byte_size = divmod(
members_data_size, member_definition.alignment_size)
if byte_size > 0:
byte_size = member_definition.alignment_size - byte_size
data_type_map.byte_size = byte_size
if byte_size is None:
members_data_size = None
else:
members_data_size += byte_size
data_type_maps.append(data_type_map)
return data_type_maps | Retrieves the member data type maps.
Args:
data_type_definition (DataTypeDefinition): data type definition.
data_type_map_cache (dict[str, DataTypeMap]): cached data type maps.
Returns:
list[DataTypeMap]: member data type maps.
Raises:
FormatError: if the data type maps cannot be determined from the data
type definition. |
12,982 | def make_rw(obj: Any):
if isinstance(obj, RoDict):
return {k: make_rw(v) for k, v in obj.items()}
elif isinstance(obj, RoList):
return [make_rw(x) for x in obj]
else:
return obj | Copy a RO object into a RW structure made with standard Python classes.
WARNING there is no protection against recursion. |
12,983 | def get_policy_config(platform,
filters=None,
prepend=True,
pillar_key=,
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format=):
*
if not filters:
filters = []
if merge_pillar and not only_lower_merge:
policy_pillar_cfg = _get_pillar_cfg(pillar_key,
saltenv=saltenv,
pillarenv=pillarenv)
filters = _merge_list_of_dict(filters, policy_pillar_cfg, prepend=prepend)
policy_object = _get_policy_object(platform,
filters=filters,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar)
policy_text = six.text_type(policy_object)
return _revision_tag(policy_text,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format) | Return the configuration of the whole policy.
platform
The name of the Capirca platform.
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
CLI Example:
.. code-block:: bash
salt '*' capirca.get_policy_config juniper pillar_key=netacl
Output Example:
.. code-block:: text
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-filter {
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-other-filter {
interface-specific;
term dummy-term {
from {
protocol [ tcp udp ];
}
then {
reject;
}
}
}
}
}
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
options:
- not-interface-specific
terms:
- my-term:
source_port: [1234, 1235]
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- my-other-filter:
terms:
- dummy-term:
protocol:
- tcp
- udp
action: reject |
12,984 | def td_sp(points, speed_threshold):
if len(points) <= 2:
return points
else:
max_speed_threshold = 0
found_index = 0
for i in range(1, len(points)-1):
dt1 = time_dist(points[i], points[i-1])
if dt1 == 0:
dt1 = 0.000000001
vim = loc_dist(points[i], points[i-1]) / dt1
dt2 = time_dist(points[i+1], points[i])
if dt2 == 0:
dt2 = 0.000000001
vi_ = loc_dist(points[i+1], points[i]) / dt2
if abs(vi_ - vim) > max_speed_threshold:
max_speed_threshold = abs(vi_ - vim)
found_index = i
if max_speed_threshold > speed_threshold:
one = td_sp(points[:found_index], speed_threshold)
two = td_sp(points[found_index:], speed_threshold)
one.extend(two)
return one
else:
return [points[0], points[-1]] | Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory |
12,985 | def delete_bucket():
args = parser.parse_args
s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name)().delete() | Delete S3 Bucket |
12,986 | def construct(cls, name: str, declared_fields: typing.List[tuple]):
@classmethod
def from_dict(cls, adict):
invalid_req = InvalidRequestObject()
values = {}
for item in fields(cls):
value = None
if item.metadata and in item.metadata and item.metadata[]:
if item.name not in adict or adict.get(item.name) is None:
invalid_req.add_error(item.name, )
else:
value = adict[item.name]
elif item.name in adict:
value = adict[item.name]
elif item.default:
value = item.default
try:
if item.type not in [typing.Any, ] and value is not None:
if item.type in [int, float, str, bool, list, dict, tuple,
datetime.date, datetime.datetime]:
value = item.type(value)
else:
if not (isinstance(value, item.type) or issubclass(value, item.type)):
invalid_req.add_error(
item.name,
.format(item.name, item.type))
except Exception:
invalid_req.add_error(
item.name,
.format(value, item.name))
values[item.name] = value
if invalid_req.has_errors:
return invalid_req
return cls(**values)
formatted_fields = cls._format_fields(declared_fields)
dc = make_dataclass(name, formatted_fields,
namespace={: from_dict, : True})
return dc | Utility method packaged along with the factory to be able to construct Request Object
classes on the fly.
Example:
.. code-block:: python
UserShowRequestObject = Factory.create_request_object(
'CreateRequestObject',
[('identifier', int, {'required': True}),
('name', str, {'required': True}),
('desc', str, {'default': 'Blah'})])
And then create a request object like so:
.. code-block:: python
request_object = UserShowRequestObject.from_dict(
{'identifier': 112,
'name': 'Jane',
'desc': "Doer is not Doe"})
The third tuple element is a `dict` of the form: {'required': True, 'default': 'John'}
* ``required`` is False by default, so ``{required: False, default: 'John'}`` and \
``{default: 'John'}`` evaluate to the same field definition
* ``default`` is a *concrete* value of the correct type |
12,987 | def get_type_properties(self, property_obj, name, additional_prop=False):
property_type, property_format, property_dict = \
super(Schema, self).get_type_properties(property_obj, name, additional_prop=additional_prop)
_schema = self.storage.get(property_type)
if _schema and ( in property_obj):
_property_type, _property_format, _property_dict = super(Schema, self).get_type_properties(
property_obj[], .format(name), additional_prop=True)
if _property_type not in PRIMITIVE_TYPES:
SchemaMapWrapper.wrap(self.storage.get(_property_type))
_schema.nested_schemas.add(_property_type)
else:
_schema.type_format = _property_type
return property_type, property_format, property_dict | Extend parents 'Get internal properties of property'-method |
12,988 | def schemaValidateOneElement(self, elem):
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlSchemaValidateOneElement(self._o, elem__o)
return ret | Validate a branch of a tree, starting with the given @elem. |
12,989 | def _create_emitter(self, event):
if not hasattr(self, event):
setattr(self, event,
lambda *args, **kwargs: self.emit(event, *args, **kwargs)) | Create a method that emits an event of the same name. |
12,990 | def call(self, op, args):
converted = self.convert_list(args)
return self._call(op, converted) | Calls operation `op` on args `args` with this backend.
:return: A backend object representing the result. |
12,991 | def vinet_v(p, v0, k0, k0p, min_strain=0.01):
if isuncertainties([p, v0, k0, k0p]):
f_u = np.vectorize(uct.wrap(vinet_v_single), excluded=[1, 2, 3, 4])
return f_u(p, v0, k0, k0p, min_strain=min_strain)
else:
f_v = np.vectorize(vinet_v_single, excluded=[1, 2, 3, 4])
return f_v(p, v0, k0, k0p, min_strain=min_strain) | find volume at given pressure
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3
:note: wrapper function vetorizing vinet_v_single |
12,992 | def popup(self, title, callfn, initialdir=None, filename=None):
self.cb = callfn
self.filew.set_title(title)
if initialdir:
self.filew.set_current_folder(initialdir)
if filename:
self.filew.set_current_name(filename)
self.filew.show() | Let user select and load file. |
12,993 | def web_services_from_str(
list_splitter_fn=ujson.loads,
):
def class_list_converter(collector_services_str):
if isinstance(collector_services_str, basestring):
all_collector_services = list_splitter_fn(collector_services_str)
else:
raise TypeError()
class InnerClassList(RequiredConfig):
return InnerClassList
return class_list_converter | parameters:
list_splitter_fn - a function that will take the json compatible string
rerpesenting a list of mappings. |
12,994 | def args_ok(self, options, args):
for i in [, ]:
for j in [, , , ]:
if (i in options.actions) and (j in options.actions):
self.help_fn("You can%s%sexecuteannotatehtmldebugreportxmlexecute' in options.actions and not args:
self.help_fn("Nothing to do.")
return False
return True | Check for conflicts and problems in the options.
Returns True if everything is ok, or False if not. |
12,995 | def from_config(self, k, v):
if k == "setup":
return from_commandline(v, classname=to_commandline(datagen.DataGenerator()))
return super(DataGenerator, self).from_config(k, v) | Hook method that allows converting values from the dictionary.
:param k: the key in the dictionary
:type k: str
:param v: the value
:type v: object
:return: the potentially parsed value
:rtype: object |
12,996 | def run_check200(_):
tstr =
idx = 1
for kind in config.router_post.keys():
posts = MPost.query_all(kind=kind, limit=20000)
for post in posts:
the_url0 = .format(
site_url=config.SITE_CFG[],
kind_url=config.router_post[post.kind],
uid=post.uid)
the_url = .format(
site_url=config.SITE_CFG[],
kind_url=config.router_post[post.kind],
uid=post.uid)
req = requests.get(the_url0)
if req.status_code == 200:
pass
else:
print(the_url0)
tstr = tstr + DT_STR.format(idx=str(idx).zfill(2), url0=the_url0, code=req.status_code, edit_link=the_url)
idx = idx + 1
time_local = time.localtime(timestamp())
with open(.format(d=str(time.strftime("%Y_%m_%d", time_local))), ) as fileo:
fileo.write(HTML_TMPL.format(cnt=tstr))
print() | Running the script. |
12,997 | def pool_full(self, session):
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0 | Returns a boolean as to whether the slot pool has room for this
task to run |
12,998 | def _instantiate_layers(self):
with self._enter_variable_scope(check_same_graph=False):
self._layers = tuple(conv.Conv2D(name="conv_2d_{}".format(i),
output_channels=self._output_channels[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
rate=self._rates[i],
padding=self._paddings[i],
use_bias=self._use_bias[i],
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format)
for i in xrange(self._num_layers)) | Instantiates all the convolutional modules used in the network. |
12,999 | def find_global(self, pattern):
pos_s = self.reader.search(pattern)
if len(pos_s) == 0:
return -1
return pos_s[0] | Searches for the pattern in the whole process memory space and returns the first occurrence.
This is exhaustive! |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.