Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
26,700 |
def _getViewerPrivateApplication(self):
ls = self.store.findUnique(userbase.LoginSystem)
substore = ls.accountByAddress(*self.username.split()).avatars.open()
from xmantissa.webapp import PrivateApplication
return substore.findUnique(PrivateApplication)
|
Get the L{PrivateApplication} object for the logged-in user who is
viewing this resource, as indicated by its C{username} attribute.
This is highly problematic because it precludes the possibility of
separating the stores of the viewer and the viewee into separate
processes, and it is only here until we can get rid of it. The reason
it remains is that some application code still imports things which
subclass L{PublicAthenaLivePage} and L{PublicPage} and uses them with
usernames specified. See ticket #2702 for progress on this goal.
However, Mantissa itself will no longer set this class's username
attribute to anything other than None, because authenticated users'
pages will be generated using
L{xmantissa.webapp._AuthenticatedWebViewer}. This method is used only
to render content in the shell template, and those classes have a direct
reference to the requisite object.
@rtype: L{PrivateApplication}
|
26,701 |
def execute(command, return_output=True, log_file=None, log_settings=None, error_logfile=None, timeout=None, line_function=None, poll_timing = 0.01, logger=None, working_folder=None, env=None):
tmp_log = False
if log_settings:
log_folder = log_settings.get()
else:
tmp_log = True
log_folder = tempfile.mkdtemp()
if not log_file:
log_file = os.path.join(log_folder, "commands", "execute-command-logfile-%s.log" % UUID.uuid4())
try:
if not os.path.isdir(os.path.join(log_folder, "commands")):
os.makedirs(os.path.join(log_folder, "commands"))
except:
pass
if not logger:
logger = logging.getLogger()
logfile_writer = open(log_file, )
header = "%s - Executing command (timeout=%s) :\n\t%s\n\n\n" % (datetime.now().isoformat(), timeout, command)
logfile_writer.write(header)
logfile_writer.flush()
logfile_reader = open(log_file, )
logfile_reader.seek(0, os.SEEK_END)
logfile_start_position = logfile_reader.tell()
if error_logfile:
err_logfile_writer = open(error_logfile, )
else:
err_logfile_writer = logfile_writer
start = datetime.now()
timeout_string = ""
if timeout:
timeout_string = "(timeout=%s)" % timeout
logger.info(u"Executing command %s :\n\t\t%s" % (timeout_string, command) )
line_function(o)
o = text_utils.uni(logfile_reader.readline()).rstrip()
if not return_output:
return process.wait()
logfile_reader.seek(logfile_start_position, os.SEEK_SET)
res = text_utils.uni(logfile_reader.read())
try:
logfile_reader.close()
logfile_writer.close()
err_logfile_writer.close()
if tmp_log:
shutil.rmtree(log_folder, ignore_errors=True)
except:
logger.exception("Error while cleaning after tbx.execute() call.")
return res
|
Execute a program and logs standard output into a file.
:param return_output: returns the STDOUT value if True or returns the return code
:param logfile: path where log file should be written ( displayed on STDOUT if not set)
:param error_logfile: path where error log file should be written ( displayed on STDERR if not set)
:param timeout: if set, it will kill the subprocess created when "timeout" seconds is reached. It will then raise an Exception.
:param line_function: set it to a "function pointer" for the function to be called each time a new line is written (line passed as a parameter).
:param poll_timing: wait time between timeout checks and std output check.
:returns: Standard output of the command or if return_output=False, it will give the "return code" of the command
|
26,702 |
def get_asset_admin_session_for_repository(self, repository_id=None, *args, **kwargs):
if not repository_id:
raise NullArgument()
if not self.supports_asset_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.AssetAdminSession(repository_id,
proxy=self._proxy,
runtime=self._runtime, **kwargs)
except AttributeError:
raise OperationFailed()
return session
|
Gets an asset administration session for the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
return: (osid.repository.AssetAdminSession) - an
AssetAdminSession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_admin() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_asset_admin() and
supports_visible_federation() are true.
|
26,703 |
def installed_plugins(only_conda=False):
available_path = MICRODROP_CONDA_SHARE.joinpath(, )
if not available_path.isdir():
return []
installed_plugins_ = []
for plugin_path_i in available_path.dirs():
if not _islinklike(plugin_path_i):
try:
with plugin_path_i.joinpath().open() as input_:
properties_i = yaml.load(input_.read())
except:
logger.info(,
plugin_path_i.joinpath(),
exc_info=True)
else:
properties_i[] = plugin_path_i.realpath()
installed_plugins_.append(properties_i)
if only_conda:
try:
package_names = [plugin_i[]
for plugin_i in installed_plugins_]
conda_package_infos = ch.package_version(package_names,
verbose=False)
except ch.PackageNotFound, exception:
logger.warning(str(exception))
conda_package_infos = exception.available
installed_package_names = set([package_i[]
for package_i in conda_package_infos])
return [plugin_i for plugin_i in installed_plugins_
if plugin_i[] in installed_package_names]
else:
return installed_plugins_
|
.. versionadded:: 0.20
Parameters
----------
only_conda : bool, optional
Only consider plugins that are installed **as Conda packages**.
.. versionadded:: 0.22
Returns
-------
list
List of properties corresponding to each available plugin that is
**installed**.
.. versionchanged:: 0.22
If :data:`only_conda` is ``False``, a plugin is assumed to be
*installed* if it is present in the
``share/microdrop/plugins/available`` directory **and** is a
**real** directory (i.e., not a link).
If :data:`only_conda` is ``True``, only properties for plugins that
are installed **as Conda packages** are returned.
|
26,704 |
def pformat(arg, width=79, height=24, compact=True):
if height is None or height < 1:
height = 1024
if width is None or width < 1:
width = 256
npopt = numpy.get_printoptions()
numpy.set_printoptions(threshold=100, linewidth=width)
if isinstance(arg, basestring):
if arg[:5].lower() in (, b):
if isinstance(arg, bytes):
arg = bytes2str(arg)
if height == 1:
arg = arg[:4*width]
else:
arg = pformat_xml(arg)
elif isinstance(arg, bytes):
if isprintable(arg):
arg = bytes2str(arg)
arg = clean_whitespace(arg)
else:
numpy.set_printoptions(**npopt)
return hexdump(arg, width=width, height=height, modulo=1)
arg = arg.rstrip()
elif isinstance(arg, numpy.record):
arg = arg.pprint()
else:
import pprint
compact = {} if sys.version_info[0] == 2 else dict(compact=compact)
arg = pprint.pformat(arg, width=width, **compact)
numpy.set_printoptions(**npopt)
if height == 1:
arg = clean_whitespace(arg, compact=True)
return arg[:width]
argl = list(arg.splitlines())
if len(argl) > height:
arg = .join(argl[:height//2] + [] + argl[-height//2:])
return arg
|
Return pretty formatted representation of object as string.
Whitespace might be altered.
|
26,705 |
def varOr(population, toolbox, lambda_, cxpb, mutpb):
offspring = []
for _ in range(lambda_):
op_choice = np.random.random()
if op_choice < cxpb:
ind1, ind2 = pick_two_individuals_eligible_for_crossover(population)
if ind1 is not None:
ind1, _ = toolbox.mate(ind1, ind2)
del ind1.fitness.values
else:
ind1 = mutate_random_individual(population, toolbox)
offspring.append(ind1)
elif op_choice < cxpb + mutpb:
ind = mutate_random_individual(population, toolbox)
offspring.append(ind)
else:
idx = np.random.randint(0, len(population))
offspring.append(toolbox.clone(population[idx]))
return offspring
|
Part of an evolutionary algorithm applying only the variation part
(crossover, mutation **or** reproduction). The modified individuals have
their fitness invalidated. The individuals are cloned so returned
population is independent of the input population.
:param population: A list of individuals to vary.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param lambda\_: The number of children to produce
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
The variation goes as follow. On each of the *lambda_* iteration, it
selects one of the three operations; crossover, mutation or reproduction.
In the case of a crossover, two individuals are selected at random from
the parental population :math:`P_\mathrm{p}`, those individuals are cloned
using the :meth:`toolbox.clone` method and then mated using the
:meth:`toolbox.mate` method. Only the first child is appended to the
offspring population :math:`P_\mathrm{o}`, the second child is discarded.
In the case of a mutation, one individual is selected at random from
:math:`P_\mathrm{p}`, it is cloned and then mutated using using the
:meth:`toolbox.mutate` method. The resulting mutant is appended to
:math:`P_\mathrm{o}`. In the case of a reproduction, one individual is
selected at random from :math:`P_\mathrm{p}`, cloned and appended to
:math:`P_\mathrm{o}`.
This variation is named *Or* beceause an offspring will never result from
both operations crossover and mutation. The sum of both probabilities
shall be in :math:`[0, 1]`, the reproduction probability is
1 - *cxpb* - *mutpb*.
|
26,706 |
def reset_time_estimate(self, **kwargs):
path = % (self.manager.path, self.get_id())
return self.manager.gitlab.http_post(path, **kwargs)
|
Resets estimated time for the object to 0 seconds.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
|
26,707 |
def get_configuration(basename=, parents=None):
copy_default_config_to_user_directory(basename)
parser = configargparse.ArgParser(
formatter_class=configargparse.ArgumentDefaultsRawHelpFormatter,
add_help=False,
parents=parents or [],
default_config_files=[
resource_filename(
Requirement.parse("scriptabit"),
os.path.join(, basename)),
os.path.join(
os.path.expanduser("~/.config/scriptabit"),
basename),
os.path.join(os.curdir, basename)])
parser.add(
,
,
required=False,
default=,
metavar=,
env_var=,
help=)
parser.add(
,
,
required=False,
default=,
help=)
parser.add(
,
,
required=False,
default=,
help=)
parser.add(
,
,
required=False,
help=)
parser.add(
,
,
required=False,
action=,
help=)
parser.add(
,
,
required=False,
action=,
help=)
parser.add(
,
,
required=False,
action=,
help=)
parser.add(
,
,
required=False,
type=int,
default=0,
help=)
parser.add(
,
,
required=False,
type=int,
default=-1,
help=)
parser.add(
,
,
required=False,
action=,
help=)
return parser.parse_known_args()[0], parser.print_help
|
Parses and returns the program configuration options,
taken from a combination of ini-style config file, and
command line arguments.
Args:
basename (str): The base filename.
parents (list): A list of ArgumentParser objects whose arguments
should also be included in the configuration parsing. These
ArgumentParser instances **must** be instantiated with the
`add_help` argument set to `False`, otherwise the main
ArgumentParser instance will raise an exception due to duplicate
help arguments.
Returns:
The options object, and a function that can be called to print the help
text.
|
26,708 |
def compute(self, motor_pct: float, tm_diff: float) -> float:
appliedVoltage = self._nominalVoltage * motor_pct
appliedVoltage = math.copysign(
max(abs(appliedVoltage) - self._vintercept, 0), appliedVoltage
)
a0 = self.acceleration
v0 = self.velocity
v1 = v0 + a0 * tm_diff
a1 = (appliedVoltage - self._kv * v1) / self._ka
v1 = v0 + (a0 + a1) * 0.5 * tm_diff
a1 = (appliedVoltage - self._kv * v1) / self._ka
self.position += (v0 + v1) * 0.5 * tm_diff
self.velocity = v1
self.acceleration = a1
return self.velocity
|
:param motor_pct: Percentage of power for motor in range [1..-1]
:param tm_diff: Time elapsed since this function was last called
:returns: velocity
|
26,709 |
def subset_sum(x, R):
k = len(x) // 2
Y = [v for v in part_sum(x[:k])]
Z = [R - v for v in part_sum(x[k:])]
Y.sort()
Z.sort()
i = 0
j = 0
while i < len(Y) and j < len(Z):
if Y[i] == Z[j]:
return True
elif Y[i] < Z[j]:
i += 1
else:
j += 1
return False
|
Subsetsum by splitting
:param x: table of values
:param R: target value
:returns bool: if there is a subsequence of x with total sum R
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
|
26,710 |
def make_rpc_call(self, rpc_command):
if not self.is_alive():
self.close()
self.open()
result = self._execute_rpc(rpc_command)
return ET.tostring(result)
|
Allow a user to query a device directly using XML-requests.
:param rpc_command: (str) rpc command such as:
<Get><Operational><LLDP><NodeTable></NodeTable></LLDP></Operational></Get>
|
26,711 |
def get_form(self, step=None, data=None, files=None):
if step is None:
step = self.steps.current
kwargs = self.get_form_kwargs(step)
kwargs.update({
: data,
: files,
: self.get_form_prefix(step, self.form_list[step]),
: self.get_form_initial(step),
})
if issubclass(self.form_list[step], forms.ModelForm):
kwargs.update({: self.get_form_instance(step)})
elif issubclass(self.form_list[step], forms.models.BaseModelFormSet):
kwargs.update({: self.get_form_instance(step)})
return self.form_list[step](**kwargs)
|
Constructs the form for a given `step`. If no `step` is defined, the
current step will be determined automatically.
The form will be initialized using the `data` argument to prefill the
new form. If needed, instance or queryset (for `ModelForm` or
`ModelFormSet`) will be added too.
|
26,712 |
def tracks(self):
if self._tracks is None:
self._tracks = TrackList(self.version, self.id)
return self._tracks
|
Tracks list context
:return: Tracks list context
|
26,713 |
def to_html(self, codebase):
body =
for section in (, , ):
val = getattr(self, section)
if val:
body += % (
printable(section), section,
.join(param.to_html() for param in val))
body += codebase.build_see_html(self.see, , self)
return ( +
) % (self.name, self.name,
htmlize_paragraphs(codebase.translate_links(self.doc, self)), body)
|
Convert this `FunctionDoc` to HTML.
|
26,714 |
def keep_alive(self):
self._lock.acquire()
try:
return len(self._deferred)
finally:
self._lock.release()
|
Prevent immediate Broker shutdown while deferred functions remain.
|
26,715 |
def dilworth(graph):
n = len(graph)
match = max_bipartite_matching(graph)
part = [None] * n
nb_chains = 0
for v in range(n - 1, -1, -1):
if part[v] is None:
u = v
while u is not None:
part[u] = nb_chains
u = match[u]
nb_chains += 1
return part
|
Decompose a DAG into a minimum number of chains by Dilworth
:param graph: directed graph in listlist or listdict format
:assumes: graph is acyclic
:returns: table giving for each vertex the number of its chains
:complexity: same as matching
|
26,716 |
def parse_authorization_code_response(uri, state=None):
if not is_secure_transport(uri):
raise InsecureTransportError()
query = urlparse.urlparse(uri).query
params = dict(urlparse.parse_qsl(query))
if not in params:
raise MissingCodeError("Missing code parameter in response.")
if state and params.get(, None) != state:
raise MismatchingStateError()
return params
|
Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the ``application/x-www-form-urlencoded`` format:
**code**
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
:param uri: The full redirect URL back to the client.
:param state: The state parameter from the authorization request.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
|
26,717 |
def consume(self, char):
if self.state == "stream":
self._stream(char)
elif self.state == "escape":
self._escape_sequence(char)
elif self.state == "escape-lb":
self._escape_parameters(char)
elif self.state == "mode":
self._mode(char)
elif self.state == "charset-g0":
self._charset_g0(char)
elif self.state == "charset-g1":
self._charset_g1(char)
|
Consume a single character and advance the state as necessary.
|
26,718 |
def get_content_item_inlines(plugins=None, base=BaseContentItemInline):
COPY_FIELDS = (
, , , ,
, , , ,
)
if plugins is None:
plugins = extensions.plugin_pool.get_plugins()
inlines = []
for plugin in plugins:
if not isinstance(plugin, extensions.ContentPlugin):
raise TypeError("get_content_item_inlines() expects to receive ContentPlugin instances, not {0}".format(plugin))
ContentItemType = plugin.model
class_name = % ContentItemType.__name__
attrs = {
: plugin.__class__.__module__,
: ContentItemType,
: plugin.verbose_name,
: plugin,
: plugin.type_name,
: plugin.fieldsets,
: plugin.admin_form_template,
: plugin.admin_init_template,
}
for name in COPY_FIELDS:
if getattr(plugin, name):
attrs[name] = getattr(plugin, name)
inlines.append(type(class_name, (base,), attrs))
inlines.sort(key=lambda inline: inline.name.lower())
return inlines
|
Dynamically generate genuine django inlines for all registered content item types.
When the `plugins` parameter is ``None``, all plugin inlines are returned.
|
26,719 |
def _authenticate_gssapi(credentials, sock_info):
if not HAVE_KERBEROS:
raise ConfigurationError(
)
try:
username = credentials.username
password = credentials.password
props = credentials.mechanism_properties
host = sock_info.address[0]
if props.canonicalize_host_name:
host = socket.getfqdn(host)
service = props.service_name + + host
if props.service_realm is not None:
service = service + + props.service_realm
if password is not None:
if _USE_PRINCIPAL:
principal = ":".join((quote(username), quote(password)))
result, ctx = kerberos.authGSSClientInit(
service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG)
else:
if in username:
user, domain = username.split(, 1)
else:
user, domain = username, None
result, ctx = kerberos.authGSSClientInit(
service, gssflags=kerberos.GSS_C_MUTUAL_FLAG,
user=user, domain=domain, password=password)
else:
result, ctx = kerberos.authGSSClientInit(
service, gssflags=kerberos.GSS_C_MUTUAL_FLAG)
if result != kerberos.AUTH_GSS_COMPLETE:
raise OperationFailure()
try:
if kerberos.authGSSClientStep(ctx, ) != 0:
raise OperationFailure(
)
finally:
kerberos.authGSSClientClean(ctx)
except kerberos.KrbError as exc:
raise OperationFailure(str(exc))
|
Authenticate using GSSAPI.
|
26,720 |
def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.text is not None:
_dict[] = self.text
if hasattr(self, ) and self.user is not None:
_dict[] = self.user
return _dict
|
Return a json dictionary representing this model.
|
26,721 |
def get_dilated_1d_attention_mask(
num_heads, block_size,
num_blocks, memory_size, gap_size,
name="dilated_mask"):
mask = np.ones((num_heads, block_size, 2*block_size), np.bool)
for i in range(block_size):
visible = 2*block_size - (block_size-i)
mask[:, i, -(block_size - i)] = 0
for j in range(num_blocks):
for k in range(memory_size):
index = ((gap_size + memory_size)*j) + k
if index >= visible:
break
mask[:, i, -(index + block_size - i + 1)] = 0
mask = np.expand_dims(mask, axis=1)
return tf.constant(mask, dtype=tf.int32, name=name)
|
Dilated attention with a masking strategy.
|
26,722 |
def info(*messages):
sys.stderr.write("%s.%s: " % get_caller_info())
sys.stderr.write(.join(map(str, messages)))
sys.stderr.write()
|
Prints the current GloTK module and a `message`.
Taken from biolite
|
26,723 |
def undo(self):
args = self._undo_stack.back()
if args is None:
return
self._data = deepcopy(self._data_base)
for clusters, field, value, up, undo_state in self._undo_stack:
if clusters is not None:
self.set(field, clusters, value, add_to_stack=False)
up, undo_state = args[-2:]
up.history =
up.undo_state = undo_state
self.emit(, up)
return up
|
Undo the last metadata change.
Returns
-------
up : UpdateInfo instance
|
26,724 |
def get_alternative_nested_val(key_tuple, dict_obj):
top_keys = key_tuple[0] if isinstance(key_tuple[0], (list, tuple)) else [
key_tuple[0]]
for key in top_keys:
try:
if len(key_tuple) < 2:
return dict_obj[key]
return get_alternative_nested_val(key_tuple[1:], dict_obj[key])
except (KeyError, TypeError, IndexError):
pass
raise KeyError
|
Return a value from nested dicts by any path in the given keys tuple.
Parameters
---------
key_tuple : tuple
Describe all possible paths for extraction.
dict_obj : dict
The outer-most dict to extract from.
Returns
-------
value : object
The extracted value, if exists. Otherwise, raises KeyError.
Example:
--------
>>> dict_obj = {'a': {'b': 7}}
>>> get_alternative_nested_val(('a', ('b', 'c')), dict_obj)
7
|
26,725 |
def save(self):
is_changed = False
for prop in self._instance.CONFIG_PARAMS:
if getattr(self, prop) != getattr(self._instance, prop):
is_changed = True
setattr(self._instance, prop, getattr(self, prop))
if is_changed:
self._instance.save_configuration()
|
Method that saves configuration parameter changes from instance of SHConfig class to global config class and
to `config.json` file.
Example of use case
``my_config = SHConfig()`` \n
``my_config.instance_id = '<new instance id>'`` \n
``my_config.save()``
|
26,726 |
def f_lock_derived_parameters(self):
for par in self._derived_parameters.values():
if not par.f_is_empty():
par.f_lock()
|
Locks all non-empty derived parameters
|
26,727 |
def unregister_editorstack(self, editorstack):
self.remove_last_focus_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True
else:
return False
|
Removing editorstack only if it's not the last remaining
|
26,728 |
def _prep_sample_and_config(ldetail_group, fastq_dir, fastq_final_dir):
files = []
print("->", ldetail_group[0]["name"], len(ldetail_group))
for read in ["R1", "R2"]:
fastq_inputs = sorted(list(set(reduce(operator.add,
(_get_fastq_files(x, read, fastq_dir) for x in ldetail_group)))))
if len(fastq_inputs) > 0:
files.append(_concat_bgzip_fastq(fastq_inputs, fastq_final_dir, read, ldetail_group[0]))
if len(files) > 0:
if _non_empty(files[0]):
out = ldetail_group[0]
out["files"] = files
return out
|
Prepare output fastq file and configuration for a single sample.
Only passes non-empty files through for processing.
|
26,729 |
def send_rpc_sync(self, conn_id, address, rpc_id, payload, timeout):
done = threading.Event()
result = {}
def send_rpc_done(conn_id, adapter_id, status, reason, rpc_status, resp_payload):
result[] = status
result[] = reason
result[] = rpc_status
result[] = resp_payload
done.set()
self.send_rpc_async(conn_id, address, rpc_id, payload, timeout, send_rpc_done)
done.wait()
return result
|
Synchronously send an RPC to this IOTile device
Args:
conn_id (int): A unique identifier that will refer to this connection
address (int): the address of the tile that we wish to send the RPC to
rpc_id (int): the 16-bit id of the RPC we want to call
payload (bytearray): the payload of the command
timeout (float): the number of seconds to wait for the RPC to execute
Returns:
dict: A dictionary with four elements
'success': a bool indicating whether we received a response to our attempted RPC
'failure_reason': a string with the reason for the failure if success == False
'status': the one byte status code returned for the RPC if success == True else None
'payload': a bytearray with the payload returned by RPC if success == True else None
|
26,730 |
def observed(cls, _func):
def wrapper(*args, **kwargs):
self = args[0]
assert(isinstance(self, Observable))
self._notify_method_before(self, _func.__name__, args, kwargs)
res = _func(*args, **kwargs)
self._notify_method_after(self, _func.__name__, res, args, kwargs)
return res
return wrapper
|
Decorate methods to be observable. If they are called on an instance
stored in a property, the model will emit before and after
notifications.
|
26,731 |
def load(self, limit=9999):
subItemList = self.api.list(.format(self.parentObjName,
self.parentKey,
self.objName,
),
limit=limit)
if self.objName == :
subItemList = list(map(lambda x: {: x}, subItemList))
if self.objName == :
sil_tmp = subItemList.values()
subItemList = []
for i in sil_tmp:
subItemList.extend(i)
return {x[self.index]: self.objType(self.api, x[],
self.parentObjName,
self.parentPayloadObj,
self.parentKey,
x)
for x in subItemList}
|
Function list
Get the list of all interfaces
@param key: The targeted object
@param limit: The limit of items to return
@return RETURN: A ForemanItem list
|
26,732 |
def contourf(self, *args, **kwargs):
args, kwargs = self._parse_plot_args(*args, **kwargs, plot_type="contourf")
kwargs["antialiased"] = False
kwargs["extend"] = "both"
contours = super().contourf(*args, **kwargs)
zorder = contours.collections[0].zorder - 0.1
levels = (contours.levels[1:] + contours.levels[:-1]) / 2
matplotlib.axes.Axes.contour(
self, *args[:3], levels=levels, cmap=contours.cmap, zorder=zorder
)
self.set_facecolor([0.75] * 3)
for c in contours.collections:
pass
return contours
|
Plot contours.
If a 3D or higher Data object is passed, a lower dimensional
channel can be plotted, provided the ``squeeze`` of the channel
has ``ndim==2`` and the first two axes do not span dimensions
other than those spanned by that channel.
Parameters
----------
data : 2D WrightTools.data.Data object
Data to plot.
channel : int or string (optional)
Channel index or name. Default is 0.
dynamic_range : boolean (optional)
Force plotting of all contours, overloading for major extent. Only applies to signed
data. Default is False.
autolabel : {'none', 'both', 'x', 'y'} (optional)
Parameterize application of labels directly from data object. Default is none.
xlabel : string (optional)
xlabel. Default is None.
ylabel : string (optional)
ylabel. Default is None.
**kwargs
matplotlib.axes.Axes.contourf__ optional keyword arguments.
__ https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.contourf.html
Returns
-------
matplotlib.contour.QuadContourSet
|
26,733 |
def ntp_authentication_key_md5(self, **kwargs):
config = ET.Element("config")
ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp")
authentication_key = ET.SubElement(ntp, "authentication-key")
keyid_key = ET.SubElement(authentication_key, "keyid")
keyid_key.text = kwargs.pop()
md5 = ET.SubElement(authentication_key, "md5")
md5.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
26,734 |
def directory(self):
if self._directory is None:
self._directory = self.api._load_directory(self.cid)
return self._directory
|
Directory that holds this file
|
26,735 |
def split_writable_text(encoder, text, encoding):
if not encoding:
return None, text
for idx, char in enumerate(text):
if encoder.can_encode(encoding, char):
continue
return text[:idx], text[idx:]
return text, None
|
Splits off as many characters from the begnning of text as
are writable with "encoding". Returns a 2-tuple (writable, rest).
|
26,736 |
def _flattenMergedProteins(proteins):
proteinSet = set()
for protein in proteins:
if isinstance(protein, tuple):
proteinSet.update(protein)
else:
proteinSet.add(protein)
return proteinSet
|
Return a set where merged protein entries in proteins are flattened.
:param proteins: an iterable of proteins, can contain merged protein entries
in the form of tuple([protein1, protein2]).
returns a set of protein entries, where all entries are strings
|
26,737 |
def detail_get(self, session, fields=[], **kwargs):
request = TOPRequest()
if not fields:
shipping = Shipping()
fields = shipping.fields
request[] = fields
for k, v in kwargs.iteritems():
if k not in (, , , , , , , , , , ) and v==None: continue
request[k] = v
self.create(self.execute(request, session))
return self.shippings
|
taobao.logistics.orders.detail.get 批量查询物流订单,返回详细信息
查询物流订单的详细信息,涉及用户隐私字段。(注:该API主要是提供给卖家查询物流订单使用,买家查询物流订单,建议使用taobao.logistics.trace.search)
|
26,738 |
def _execute_and_process_stdout(self, args, shell, handler):
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell,
bufsize=1)
out =
try:
while True:
line = proc.stdout.readline()
if line:
handler(line)
else:
break
finally:
(unexpected_out, err) = proc.communicate()
if unexpected_out:
out = % unexpected_out
for line in unexpected_out.splitlines():
handler(line)
ret = proc.returncode
logging.debug(,
utils.cli_cmd_to_string(args), out, err, ret)
if ret == 0:
return err
else:
raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
|
Executes adb commands and processes the stdout with a handler.
Args:
args: string or list of strings, program arguments.
See subprocess.Popen() documentation.
shell: bool, True to run this command through the system shell,
False to invoke it directly. See subprocess.Popen() docs.
handler: func, a function to handle adb stdout line by line.
Returns:
The stderr of the adb command run if exit code is 0.
Raises:
AdbError: The adb command exit code is not 0.
|
26,739 |
def csv(ctx, force, threads, mapping, data):
lines = 0
for line in DictReader(data):
lines += 1
data.seek(0)
mapping = yaml.load(mapping)
mapping_loader = MappingLoader(ctx.obj[], mapping)
def process_row(row):
try:
mapping_loader.load(row)
except GranoException, ge:
msg = % ge.message
click.secho(msg, fg=, bold=True)
if not force:
os._exit(1)
except RowException, re:
if not force:
msg = % (row[], re.message)
click.secho(msg, fg=, bold=True)
os._exit(1)
def generate():
with click.progressbar(DictReader(data),
label=data.name,
length=lines) as bar:
for i, row in enumerate(bar):
row[] = i
yield row
threaded(generate(), process_row, num_threads=threads,
max_queue=1)
|
Load CSV data into a grano instance using a mapping specification.
|
26,740 |
def collate(binder, ruleset=None, includes=None):
html_formatter = SingleHTMLFormatter(binder, includes)
raw_html = io.BytesIO(bytes(html_formatter))
collated_html = io.BytesIO()
if ruleset is None:
return binder
easybake(ruleset, raw_html, collated_html)
collated_html.seek(0)
collated_binder = reconstitute(collated_html)
return collated_binder
|
Given a ``Binder`` as ``binder``, collate the content into a new set
of models.
Returns the collated binder.
|
26,741 |
def _tls12_SHA256PRF(secret, label, seed, req_len):
return _tls_P_SHA256(secret, label + seed, req_len)
|
Provides the implementation of TLS 1.2 PRF function as
defined in section 5 of RFC 5246:
PRF(secret, label, seed) = P_SHA256(secret, label + seed)
Parameters are:
- secret: the secret used by the HMAC in the 2 expansion
functions (S1 and S2 are the halves of this secret).
- label: specific label as defined in various sections of the RFC
depending on the use of the generated PRF keystream
- seed: the seed used by the expansion functions.
- req_len: amount of keystream to be generated
|
26,742 |
def build_swagger12_handler(schema):
if schema:
return SwaggerHandler(
op_for_request=schema.validators_for_request,
handle_request=handle_request,
handle_response=validate_response,
)
|
Builds a swagger12 handler or returns None if no schema is present.
:type schema: :class:`pyramid_swagger.model.SwaggerSchema`
:rtype: :class:`SwaggerHandler` or None
|
26,743 |
def limitReal(x, max_denominator=1000000):
f = Fraction(x).limit_denominator(max_denominator)
return Real((f.numerator, f.denominator))
|
Creates an pysmt Real constant from x.
Args:
x (number): A number to be cast to a pysmt constant.
max_denominator (int, optional): The maximum size of the denominator.
Default 1000000.
Returns:
A Real constant with the given value and the denominator limited.
|
26,744 |
def _energy_coeffs(m1, m2, chi1, chi2):
mtot = m1 + m2
eta = m1*m2 / (mtot*mtot)
chi = (m1*chi1 + m2*chi2) / mtot
chisym = (chi1 + chi2) / 2.
beta = (113.*chi - 76.*eta*chisym)/12.
sigma12 = 79.*eta*chi1*chi2/8.
sigmaqm = 81.*m1*m1*chi1*chi1/(16.*mtot*mtot) \
+ 81.*m2*m2*chi2*chi2/(16.*mtot*mtot)
energy0 = -0.5*eta
energy2 = -0.75 - eta/12.
energy3 = 0.
energy4 = -3.375 + (19*eta)/8. - pow(eta,2)/24.
energy5 = 0.
energy6 = -10.546875 - (155*pow(eta,2))/96. - (35*pow(eta,3))/5184. \
+ eta*(59.80034722222222 - (205*pow(lal.PI,2))/96.)
energy3 += (32*beta)/113. + (52*chisym*eta)/113.
energy4 += (-16*sigma12)/79. - (16*sigmaqm)/81.
energy5 += (96*beta)/113. + ((-124*beta)/339. - (522*chisym)/113.)*eta \
- (710*chisym*pow(eta,2))/339.
return (energy0, energy2, energy3, energy4, energy5, energy6)
|
Return the center-of-mass energy coefficients up to 3.0pN (2.5pN spin)
|
26,745 |
def _getnode(self, curie):
if re.match(r, curie):
if self.are_bnodes_skized is True:
node = self.skolemizeBlankNode(curie)
else:
node = curie
elif re.match(r, curie):
node = curie
elif len(curie.split()) == 2:
node = StreamedGraph.curie_util.get_uri(curie)
else:
raise TypeError("Cannot process curie {}".format(curie))
return node
|
Returns IRI, or blank node curie/iri depending on
self.skolemize_blank_node setting
:param curie: str id as curie or iri
:return:
|
26,746 |
def stop(self):
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None
|
Request thread to stop.
Does not wait for actual termination (use join() method).
|
26,747 |
def construct_nucmer_cmdline(
fname1,
fname2,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"])
outprefix = os.path.join(
outsubdir,
"%s_vs_%s"
% (
os.path.splitext(os.path.split(fname1)[-1])[0],
os.path.splitext(os.path.split(fname2)[-1])[0],
),
)
if maxmatch:
mode = "--maxmatch"
else:
mode = "--mum"
nucmercmd = "{0} {1} -p {2} {3} {4}".format(
nucmer_exe, mode, outprefix, fname1, fname2
)
filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format(
filter_exe, outprefix + ".delta", outprefix + ".filter"
)
return (nucmercmd, filtercmd)
|
Returns a tuple of NUCmer and delta-filter commands
The split into a tuple was made necessary by changes to SGE/OGE. The
delta-filter command must now be run as a dependency of the NUCmer
command, and be wrapped in a Python script to capture STDOUT.
NOTE: This command-line writes output data to a subdirectory of the passed
outdir, called "nucmer_output".
- fname1 - query FASTA filepath
- fname2 - subject FASTA filepath
- outdir - path to output directory
- maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch
option. If not, the -mum option is used instead
|
26,748 |
def abs_energy(self, x):
_energy = feature_calculators.abs_energy(x)
logging.debug("abs energy by tsfresh calculated")
return _energy
|
As in tsfresh `abs_energy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L390>`_ \
Returns the absolute energy of the time series which is the sum over the squared values\
.. math::
E=\\sum_{i=1,\ldots, n}x_i^2
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float
|
26,749 |
def triangle_area(e1, e2, e3):
e1_length = numpy.sqrt(numpy.sum(e1 * e1, axis=-1))
e2_length = numpy.sqrt(numpy.sum(e2 * e2, axis=-1))
e3_length = numpy.sqrt(numpy.sum(e3 * e3, axis=-1))
s = (e1_length + e2_length + e3_length) / 2.0
return numpy.sqrt(s * (s - e1_length) * (s - e2_length) * (s - e3_length))
|
Get the area of triangle formed by three vectors.
Parameters are three three-dimensional numpy arrays representing
vectors of triangle's edges in Cartesian space.
:returns:
Float number, the area of the triangle in squared units of coordinates,
or numpy array of shape of edges with one dimension less.
Uses Heron formula, see http://mathworld.wolfram.com/HeronsFormula.html.
|
26,750 |
def hide(input_image: Union[str, IO[bytes]], message: str):
message_length = len(message)
assert message_length != 0, "message message_length is zero"
assert message_length < 255, "message is too long"
img = tools.open_image(input_image)
encoded = img.copy()
width, height = img.size
index = 0
for row in range(height):
for col in range(width):
(r, g, b) = img.getpixel((col, row))
if row == 0 and col == 0 and index < message_length:
asc = message_length
elif index <= message_length:
c = message[index - 1]
asc = ord(c)
else:
asc = r
encoded.putpixel((col, row), (asc, g, b))
index += 1
img.close()
return encoded
|
Hide a message (string) in an image.
Use the red portion of a pixel (r, g, b) tuple to
hide the message string characters as ASCII values.
The red value of the first pixel is used for message_length of the string.
|
26,751 |
def _determine_resource_pool(session, vm_):
resource_pool =
if in vm_.keys():
resource_pool = _get_pool(vm_[], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug(, pool_record[])
return resource_pool
|
Called by create() used to determine resource pool
|
26,752 |
def cmd_annotate(self, argv, help):
parser = argparse.ArgumentParser(
prog="%s annotate" % self.progname,
description=help,
)
parser.parse_args(argv)
list(self.instances.values())
for global_section in sorted(self.config):
for sectionname in sorted(self.config[global_section]):
print("[%s:%s]" % (global_section, sectionname))
section = self.config[global_section][sectionname]
for option, value in sorted(section._dict.items()):
print("%s = %s" % (option, value.value))
print(" %s" % value.src)
print()
|
Prints annotated config
|
26,753 |
def milestones(self, extra_params=None):
params = {
: settings.MAX_PER_PAGE,
}
if extra_params:
params.update(extra_params)
return self.api._get_json(
Milestone,
space=self,
rel_path=self._build_rel_path(),
extra_params=params,
get_all=True,
)
|
All Milestones in this Space
|
26,754 |
def setReturnParameter(self, name, type, namespace=None, element_type=0):
parameter = ParameterInfo(name, type, namespace, element_type)
self.retval = parameter
return parameter
|
Set the return parameter description for the call info.
|
26,755 |
def _get_adj_list_directional(self, umis, counts):
adj_list = {umi: [] for umi in umis}
if self.fuzzy_match:
for umi1 in umis:
comp_regex_err = regex.compile("(%s){e<=1}" % str(umi1))
comp_regex_del = regex.compile("(%s){i<=1}" % str(umi1)[::-1])
for umi2 in umis:
if umi1 == umi2:
continue
if counts[umi1] >= (counts[umi2]*self.dir_threshold):
if (max(len(umi1), len(umi2)) -
min(len(umi1), len(umi2))) > 1:
continue
if (comp_regex_err.match(str(umi2)) or
comp_regex_del.match(str(umi2))):
adj_list[umi1].append(umi2)
else:
for umi1, umi2 in itertools.combinations(umis, 2):
if edit_distance(umi1, umi2) <= 1:
if counts[umi1] >= (counts[umi2]*2)-1:
adj_list[umi1].append(umi2)
if counts[umi2] >= (counts[umi1]*2)-1:
adj_list[umi2].append(umi1)
return adj_list
|
identify all umis within the hamming distance threshold
and where the counts of the first umi is > (2 * second umi counts)-1
|
26,756 |
def fix_microsoft (foo):
i = 0
bar = []
while i < len(foo):
text, lemma, pos, tag = foo[i]
if (text == "
prev_tok = bar[-1]
prev_tok[0] += "
prev_tok[1] += "
bar[-1] = prev_tok
else:
bar.append(foo[i])
i += 1
return bar
|
fix special case for `c#`, `f#`, etc.; thanks Microsoft
|
26,757 |
def get_source_scanner(self, node):
scanner = None
try:
scanner = self.builder.source_scanner
except AttributeError:
pass
if not scanner:
scanner = self.get_env_scanner(self.get_build_env())
if scanner:
scanner = scanner.select(node)
return scanner
|
Fetch the source scanner for the specified node
NOTE: "self" is the target being built, "node" is
the source file for which we want to fetch the scanner.
Implies self.has_builder() is true; again, expect to only be
called from locations where this is already verified.
This function may be called very often; it attempts to cache
the scanner found to improve performance.
|
26,758 |
def cmd_rc(self, args):
if len(args) != 2:
print("Usage: rc <channel|all> <pwmvalue>")
return
value = int(args[1])
if value > 65535 or value < -1:
raise ValueError("PWM value must be a positive integer between 0 and 65535")
if value == -1:
value = 65535
channels = self.override
if args[0] == :
for i in range(16):
channels[i] = value
else:
channel = int(args[0])
if channel < 1 or channel > 16:
print("Channel must be between 1 and 8 or ")
return
channels[channel - 1] = value
self.set_override(channels)
|
handle RC value override
|
26,759 |
def get_target_hash(target_filepath):
securesystemslib.formats.RELPATH_SCHEMA.check_match(target_filepath)
digest_object = securesystemslib.hash.digest(HASH_FUNCTION)
encoded_target_filepath = target_filepath.encode()
digest_object.update(encoded_target_filepath)
target_filepath_hash = digest_object.hexdigest()
return target_filepath_hash
|
<Purpose>
Compute the hash of 'target_filepath'. This is useful in conjunction with
the "path_hash_prefixes" attribute in a delegated targets role, which tells
us which paths it is implicitly responsible for.
The repository may optionally organize targets into hashed bins to ease
target delegations and role metadata management. The use of consistent
hashing allows for a uniform distribution of targets into bins.
<Arguments>
target_filepath:
The path to the target file on the repository. This will be relative to
the 'targets' (or equivalent) directory on a given mirror.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The hash of 'target_filepath'.
|
26,760 |
def update_director(self, service_id, version_number, name_key, **kwargs):
body = self._formdata(kwargs, FastlyDirector.FIELDS)
content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyDirector(self, content)
|
Update the director for a particular service and version.
|
26,761 |
def _substitute_default(s, new_value):
if new_value is None:
return s
|
Replaces the default value in a parameter docstring by a new value.
The docstring must conform to the numpydoc style and have the form
"something (keyname=<value-to-replace>)"
If no matching pattern is found or ``new_value`` is None, return
the input untouched.
Examples
--------
>>> _replace_default('int (default=128)', 256)
'int (default=256)'
>>> _replace_default('nonlin (default = ReLU())', nn.Hardtanh(1, 2))
'nonlin (default = Hardtanh(min_val=1, max_val=2))'
|
26,762 |
def halt(self):
self.explorer.halt()
self.protocoler.halt()
self.bs_calibrator.halt()
self.tone_calibrator.halt()
self.charter.halt()
self.mphone_calibrator.halt()
|
Halts any/all running operations
|
26,763 |
def build(self, tokenlist):
self._tokens = tokenlist
self._tokens.reverse()
self._push()
while self._tokens:
node = self._handle_token(self._tokens.pop())
self._write(node)
return self._pop()
|
Build a Wikicode object from a list tokens and return it.
|
26,764 |
def get_returner_options(virtualname=None,
ret=None,
attrs=None,
**kwargs):
config.optionvirtualname.%key
ret_config = _fetch_ret_config(ret)
attrs = attrs or {}
profile_attr = kwargs.get(, None)
profile_attrs = kwargs.get(, None)
defaults = kwargs.get(, None)
__salt__ = kwargs.get(, {})
__opts__ = kwargs.get(, {})
cfg = __salt__.get(, __opts__)
_options = dict(
_options_browser(
cfg,
ret_config,
defaults,
virtualname,
attrs,
)
)
_options.update(
_fetch_profile_opts(
cfg,
virtualname,
__salt__,
_options,
profile_attr,
profile_attrs
)
)
if ret and in ret:
_options.update(ret[])
return _options
|
Get the returner options from salt.
:param str virtualname: The returner virtualname (as returned
by __virtual__()
:param ret: result of the module that ran. dict-like object
May contain a `ret_config` key pointing to a string
If a `ret_config` is specified, config options are read from::
value.virtualname.option
If not, config options are read from::
value.virtualname.option
:param attrs: options the returner wants to read
:param __opts__: Optional dict-like object that contains a fallback config
in case the param `__salt__` is not supplied.
Defaults to empty dict.
:param __salt__: Optional dict-like object that exposes the salt API.
Defaults to empty dict.
a) if __salt__ contains a 'config.option' configuration options,
we infer the returner is being called from a state or module run ->
config is a copy of the `config.option` function
b) if __salt__ was not available, we infer that the returner is being
called from the Salt scheduler, so we look for the
configuration options in the param `__opts__`
-> cfg is a copy for the __opts__ dictionary
:param str profile_attr: Optional.
If supplied, an overriding config profile is read from
the corresponding key of `__salt__`.
:param dict profile_attrs: Optional
.. fixme:: only keys are read
For each key in profile_attr, a value is read in the are
used to fetch a value pointed by 'virtualname.%key' in
the dict found thanks to the param `profile_attr`
|
26,765 |
def console_print_frame(
con: tcod.console.Console,
x: int,
y: int,
w: int,
h: int,
clear: bool = True,
flag: int = BKGND_DEFAULT,
fmt: str = "",
) -> None:
fmt = _fmt(fmt) if fmt else ffi.NULL
lib.TCOD_console_printf_frame(_console(con), x, y, w, h, clear, flag, fmt)
|
Draw a framed rectangle with optinal text.
This uses the default background color and blend mode to fill the
rectangle and the default foreground to draw the outline.
`fmt` will be printed on the inside of the rectangle, word-wrapped.
If `fmt` is empty then no title will be drawn.
.. versionchanged:: 8.2
Now supports Unicode strings.
.. deprecated:: 8.5
Use :any:`Console.print_frame` instead.
|
26,766 |
def _grab_version(self):
original_version = self.vcs.version
logger.debug("Extracted version: %s", original_version)
if original_version is None:
logger.critical()
sys.exit(1)
suggestion = utils.cleanup_version(original_version)
new_version = utils.ask_version("Enter version", default=suggestion)
if not new_version:
new_version = suggestion
self.data[] = original_version
self.data[] = new_version
|
Set the version to a non-development version.
|
26,767 |
def _cfgs_to_read(self):
cfg = Config.DEFAULT_CONFIG_FILE_NAME
filenames = [
self.default_config_file,
cfg,
os.path.join(os.path.expanduser( + os.path.sep), cfg),
,
]
if self.cfg_dir:
from glob import glob
filenames.extend(glob(self.cfg_dir + os.path.sep + "*.cfg"))
return filenames
|
reads config files from various locations to build final config.
|
26,768 |
def gen_find_method(ele_type, multiple=True, extra_maps=None):
_ = if multiple else
d = {
:
}
if isinstance(extra_maps, dict):
d = dict(d, **extra_maps)
return .format(_, d.get(ele_type, ele_type))
|
将 ele_type 转换成对应的元素查找方法
e.g::
make_elt(ele_type=name, False) => find_element_by_name
make_elt(ele_type=name, True) => find_elements_by_name
:param ele_type:
:type ele_type:
:param multiple:
:type multiple:
:param extra_maps:
:type extra_maps:
:return:
|
26,769 |
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
|
Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback.
|
26,770 |
def read_json(filepath, intkeys=True, intvalues=True):
d = json.load(ensure_open(find_filepath(filepath), mode=))
d = update_dict_types(d, update_keys=intkeys, update_values=intvalues)
return d
|
read text from filepath (`open(find_filepath(expand_filepath(fp)))`) then json.loads()
>>> read_json('HTTP_1.1 Status Code Definitions.html.json')
{'100': 'Continue',
'101': 'Switching Protocols',...
|
26,771 |
def encode_quopri(msg):
orig = msg.get_payload()
encdata = _qencode(orig)
msg.set_payload(encdata)
msg[] =
|
Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
|
26,772 |
def fold(table, key, f, value=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
return FoldView(table, key, f, value=value, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
|
Reduce rows recursively via the Python standard :func:`reduce` function.
E.g.::
>>> import petl as etl
>>> table1 = [['id', 'count'],
... [1, 3],
... [1, 5],
... [2, 4],
... [2, 8]]
>>> import operator
>>> table2 = etl.fold(table1, 'id', operator.add, 'count',
... presorted=True)
>>> table2
+-----+-------+
| key | value |
+=====+=======+
| 1 | 8 |
+-----+-------+
| 2 | 12 |
+-----+-------+
See also :func:`petl.transform.reductions.aggregate`,
:func:`petl.transform.reductions.rowreduce`.
|
26,773 |
def _value_formatter(self, float_format=None, threshold=None):
if float_format is None:
float_format = self.float_format
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != :
def decimal_formatter(v):
return base_formatter(v).replace(, self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
|
Returns a function to be applied on each value to format it
|
26,774 |
def _load_fits(self, h5file):
fits = {}
for key in []:
fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file)
for key in [, ]:
fits[key] = self._load_vector_fit(key, h5file)
return fits
|
Loads fits from h5file and returns a dictionary of fits.
|
26,775 |
def setbit(self, name, offset, value):
value = value and 1 or 0
return self.execute_command(, name, offset, value)
|
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
|
26,776 |
def set_status(self, status: Status, increment_try_count: bool=True,
filename: str=None):
url = self.url_record.url
assert not self._try_count_incremented, (url, status)
if increment_try_count:
self._try_count_incremented = True
_logger.debug(__(, url, status))
url_result = URLResult()
url_result.filename = filename
self.app_session.factory[].check_in(
url,
status,
increment_try_count=increment_try_count,
url_result=url_result,
)
self._processed = True
|
Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
|
26,777 |
def chain(first_converter, second_converter, strict: bool):
if isinstance(first_converter, ConversionChain):
if isinstance(second_converter, ConversionChain):
if (first_converter.strict == strict) and (second_converter.strict == strict):
return first_converter.add_conversion_steps(second_converter._converters_list)
else:
if not strict:
return ConversionChain(initial_converters=first_converter._converters_list,
strict_chaining=False) \
.add_conversion_steps(second_converter._converters_list)
else:
raise ValueError()
else:
if strict == first_converter.strict:
return first_converter.add_conversion_step(second_converter)
else:
if not strict:
return ConversionChain(initial_converters=[second_converter], strict_chaining=False) \
.insert_conversion_steps_at_beginning(first_converter._converters_list)
else:
raise ValueError(
)
else:
if isinstance(second_converter, ConversionChain):
if strict == second_converter.strict:
return second_converter.insert_conversion_step_at_beginning(first_converter)
else:
if not strict:
return ConversionChain(initial_converters=[first_converter], strict_chaining=False) \
.add_conversion_steps(second_converter._converters_list)
else:
raise ValueError(
)
else:
return ConversionChain([first_converter, second_converter], strict)
|
Utility method to chain two converters. If any of them is already a ConversionChain, this method "unpacks" it
first. Note: the created conversion chain is created with the provided 'strict' flag, that may be different
from the ones of the converters (if compliant). For example you may chain a 'strict' chain with a 'non-strict'
chain, to produce a 'non-strict' chain.
:param first_converter:
:param second_converter:
:param strict:
:return:
|
26,778 |
def log_future_exceptions(logger, f, ignore=()):
def log_cb(f):
try:
f.result()
except ignore:
pass
except Exception:
logger.exception()
f.add_done_callback(log_cb)
|
Log any exceptions set to a future
Parameters
----------
logger : logging.Logger instance
logger.exception(...) is called if the future resolves with an exception
f : Future object
Future to be monitored for exceptions
ignore : Exception or tuple of Exception
Exptected exception(s) to ignore, i.e. they will not be logged.
Notes
-----
This is useful when an async task is started for its side effects without waiting for
the result. The problem is that if the future's resolution is not checked for
exceptions, unhandled exceptions in the async task will be silently ignored.
|
26,779 |
async def recv(self):
if self.readyState != :
raise MediaStreamError
frame = await self._queue.get()
if frame is None:
self.stop()
raise MediaStreamError
return frame
|
Receive the next frame.
|
26,780 |
async def update(
self,
service_id: str,
version: str,
*,
image: str = None,
rollback: bool = False
) -> bool:
if image is None and rollback is False:
raise ValueError("You need to specify an image.")
inspect_service = await self.inspect(service_id)
spec = inspect_service["Spec"]
if image is not None:
spec["TaskTemplate"]["ContainerSpec"]["Image"] = image
params = {"version": version}
if rollback is True:
params["rollback"] = "previous"
data = json.dumps(clean_map(spec))
await self.docker._query_json(
"services/{service_id}/update".format(service_id=service_id),
method="POST",
data=data,
params=params,
)
return True
|
Update a service.
If rollback is True image will be ignored.
Args:
service_id: ID or name of the service.
version: Version of the service that you want to update.
rollback: Rollback the service to the previous service spec.
Returns:
True if successful.
|
26,781 |
def sbo_version_source(self, slackbuilds):
sbo_versions, sources = [], []
for sbo in slackbuilds:
status(0.02)
sbo_ver = "{0}-{1}".format(sbo, SBoGrep(sbo).version())
sbo_versions.append(sbo_ver)
sources.append(SBoGrep(sbo).source())
return [sbo_versions, sources]
|
Create sbo name with version
|
26,782 |
def p_content(self, content):
content[0] = self.doctype(content[1], content[3], content[4], content[5])
if self.toc:
self.toc.set_articles([a for a in content[0].sections if isinstance(a, Article)])
|
content : TITLE opttexts VERSION opttexts sections
| TITLE STATESTAG VERSION opttexts states_sections
|
26,783 |
def readMyEC2Tag(tagName, connection=None):
assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName)
if not connection:
connection = boto.ec2.connect_to_region(myRegion())
return readInstanceTag(connection=connection,
instanceID=myInstanceID(),
tagName=tagName)
|
Load an EC2 tag for the running instance & print it.
:param str tagName: Name of the tag to read
:param connection: Optional boto connection
|
26,784 |
def exists(self):
try:
key = self.key
except DoesNotExist:
return False
else:
return self.connection.exists(key)
|
Call the exists command to check if the redis key exists for the current
field
|
26,785 |
def from_file(self, filename):
mimetype = mimetypes.guess_type(filename)[0] or "application/octal-stream"
headers = {
"Content-Type": mimetype,
"Content-Length": str(os.path.getsize(filename)),
}
file_data = self._pump.request(
"/api/user/{0}/uploads".format(self._pump.client.nickname),
method="POST",
data=open(filename, "rb").read(),
headers=headers,
)
data = {
"verb": "post",
"object": file_data,
}
data.update(self.serialize())
if not self.content and not self.display_name and not self.license:
self._post_activity(data)
else:
self._post_activity(data, unserialize=False)
if self.content:
file_data[] = self.content
if self.display_name:
file_data[] = self.display_name
if self.license:
file_data[] = self.license
data = {
"verb": "update",
"object": file_data,
}
self._post_activity(data)
return self
|
Uploads a file from a filename on your system.
:param filename: Path to file on your system.
Example:
>>> myimage.from_file('/path/to/dinner.png')
|
26,786 |
def export(results_dir, filename, do_not_try_parsing, parameters):
_, extension = os.path.splitext(filename)
campaign = sem.CampaignManager.load(results_dir)
[params, defaults] = zip(*get_params_and_defaults(campaign.db.get_params(),
campaign.db))
if do_not_try_parsing:
parsing_function = None
else:
parsing_function = sem.utils.automatic_parser
if not parameters:
string_defaults = list()
for idx, d in enumerate(defaults):
string_defaults.append(str(d))
parameter_query = query_parameters(params, string_defaults)
else:
parameter_query = import_parameters_from_file(parameters)
if extension == ".mat":
campaign.save_to_mat_file(parameter_query, parsing_function, filename,
runs=click.prompt("Runs", type=int))
elif extension == ".npy":
campaign.save_to_npy_file(parameter_query, parsing_function, filename,
runs=click.prompt("Runs", type=int))
elif extension == "":
campaign.save_to_folders(parameter_query, filename,
runs=click.prompt("Runs", type=int))
else:
raise ValueError("Format not recognized")
|
Export results to file.
An extension in filename is required to deduce the file type. If no
extension is specified, a directory tree export will be used. Note that
this command automatically tries to parse the simulation output.
Supported extensions:
.mat (Matlab file),
.npy (Numpy file),
no extension (Directory tree)
|
26,787 |
def _add(self, codeobj):
assert isinstance(codeobj, (CodeStatement, CodeExpression))
self.body._add(codeobj)
|
Add a child (statement) to this object.
|
26,788 |
def _contains_cftime_datetimes(array) -> bool:
try:
from cftime import datetime as cftime_datetime
except ImportError:
return False
else:
if array.dtype == np.dtype() and array.size > 0:
sample = array.ravel()[0]
if isinstance(sample, dask_array_type):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item()
return isinstance(sample, cftime_datetime)
else:
return False
|
Check if an array contains cftime.datetime objects
|
26,789 |
def removeFriend(self, user):
user = self.user(user)
url = self.FRIENDUPDATE if user.friend else self.REMOVEINVITE
url = url.format(userId=user.id)
return self.query(url, self._session.delete)
|
Remove the specified user from all sharing.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
|
26,790 |
def returns(schema):
validate = parse(schema).validate
@decorator
def validating(func, *args, **kwargs):
ret = func(*args, **kwargs)
validate(ret, adapt=False)
return ret
return validating
|
Create a decorator for validating function return value.
Example::
@accepts(a=int, b=int)
@returns(int)
def f(a, b):
return a + b
:param schema: The schema for adapting a given parameter.
|
26,791 |
def check_url (aggregate):
while True:
try:
aggregate.urlqueue.join(timeout=30)
break
except urlqueue.Timeout:
aggregate.remove_stopped_threads()
if not any(aggregate.get_check_threads()):
break
|
Helper function waiting for URL queue.
|
26,792 |
def attachment_both(self, files, parentid=None):
orig = self._attachment_template("imported_file")
to_add = [orig.copy() for f in files]
for idx, tmplt in enumerate(to_add):
tmplt["title"] = files[idx][0]
tmplt["filename"] = files[idx][1]
if parentid:
return self._attachment(to_add, parentid)
else:
return self._attachment(to_add)
|
Add child attachments using title, filename
Arguments:
One or more lists or tuples containing title, file path
An optional Item ID, which will create child attachments
|
26,793 |
def getZoom(self, resolution):
"Return the zoom level for a given resolution"
assert resolution in self.RESOLUTIONS
return self.RESOLUTIONS.index(resolution)
|
Return the zoom level for a given resolution
|
26,794 |
def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow,
clear_data_store=True, store_args=None):
try:
wf = Workflow.from_name(name,
queue=queue,
clear_data_store=clear_data_store,
arguments=store_args)
except DirectedAcyclicGraphInvalid as e:
raise WorkflowDefinitionError(workflow_name=name,
graph_name=e.graph_name)
celery_app = create_app(config)
result = celery_app.send_task(JobExecPath.Workflow,
args=(wf,), queue=queue, routing_key=queue)
return result.id
|
Start a single workflow by sending it to the workflow queue.
Args:
name (str): The name of the workflow that should be started. Refers to the
name of the workflow file without the .py extension.
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
store_args (dict): Dictionary of additional arguments that are ingested into the
data store prior to the execution of the workflow.
Returns:
str: The ID of the workflow job.
Raises:
WorkflowArgumentError: If the workflow requires arguments to be set in store_args
that were not supplied to the workflow.
WorkflowImportError: If the import of the workflow fails.
|
26,795 |
def head(self, path=None, url_kwargs=None, **kwargs):
return self._session.head(self._url(path, url_kwargs), **kwargs)
|
Sends a HEAD request.
:param path:
The HTTP path (either absolute or relative).
:param url_kwargs:
Parameters to override in the generated URL. See `~hyperlink.URL`.
:param **kwargs:
Optional arguments that ``request`` takes.
:return: response object
|
26,796 |
def make_request(self, url, method=, headers=None, data=None,
callback=None, errors=STRICT, verify=False, timeout=None, **params):
error_modes = (STRICT, GRACEFUL, IGNORE)
error_mode = errors or GRACEFUL
if error_mode.lower() not in error_modes:
raise ValueError(
% .join(error_modes))
if callback is None:
callback = self._default_resp_callback
request = getattr(requests, method.lower())
log.debug( % url)
log.debug( % method)
log.debug( % params)
log.debug( % headers)
log.debug( % timeout)
r = request(
url, headers=headers, data=data, verify=verify, timeout=timeout, params=params)
log.debug( % r.url)
try:
r.raise_for_status()
return callback(r)
except Exception as e:
return self._with_error_handling(r, e,
error_mode, self.response_format)
|
Reusable method for performing requests.
:param url - URL to request
:param method - request method, default is 'get'
:param headers - request headers
:param data - post data
:param callback - callback to be applied to response,
default callback will parse response as json object.
:param errors - specifies communication errors handling mode, possible
values are:
* strict (default) - throw an error as soon as one
occurred
* graceful - ignore certain errors, e.g. EmptyResponse
* ignore - ignore all errors and return a result in
any case.
NOTE that it DOES NOT mean that no
exceptions can be
raised from this method, it mostly ignores
communication
related errors.
* None or empty string equals to default
:param verify - whether or not to verify SSL cert, default to False
:param timeout - the timeout of the request in second, default to None
:param params - additional query parameters for request
|
26,797 |
def find_local_boundary(tri, triangles):
r
edges = []
for triangle in triangles:
for i in range(3):
pt1 = tri.simplices[triangle][i]
pt2 = tri.simplices[triangle][(i + 1) % 3]
if (pt1, pt2) in edges:
edges.remove((pt1, pt2))
elif (pt2, pt1) in edges:
edges.remove((pt2, pt1))
else:
edges.append((pt1, pt2))
return edges
|
r"""Find and return the outside edges of a collection of natural neighbor triangles.
There is no guarantee that this boundary is convex, so ConvexHull is not
sufficient in some situations.
Parameters
----------
tri: Object
A Delaunay Triangulation
triangles: (N, ) array
List of natural neighbor triangles.
Returns
-------
edges: (2, N) ndarray
List of vertex codes that form outer edges of
a group of natural neighbor triangles.
|
26,798 |
def _initialize_part_map(self):
self._my_map[] = []
self._my_map[] = []
item_ids = self._assessment_part.get_item_ids()
if item_ids.available():
self._load_simple_section_questions(item_ids)
else:
pass
|
Sets up assessmentPartMap with as much information as is initially available.
|
26,799 |
def noaa_prompt_1():
print("Enter the project information below. We'll use this to create the WDS URL")
print("What is the project name?")
_project = input(">")
print("What is the project version?")
_version = input(">")
return _project, _version
|
For converting LiPD files to NOAA, we need a couple more pieces of information to create the WDS links
:return str _project: Project name
:return float _version: Version number
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.