Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
23,100 | def format_citations(zid, url=, hits=10, tag_prefix=):
url = (
.format(id=zid, url=url, hits=hits))
metadata = requests.get(url).json()
lines = []
for i, hit in enumerate(metadata[][]):
version = hit[][][len(tag_prefix):]
lines.append( * len(version))
lines.append(version)
lines.append( * len(version))
lines.append()
lines.append(
.format(**hit[]))
if i < hits - 1:
lines.append()
return .join(lines) | Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs |
23,101 | def convert_dotted(params):
if not isinstance(params, dictset):
params = dictset(params)
dotted_items = {k: v for k, v in params.items() if in k}
if dotted_items:
dicts = [str2dict(key, val) for key, val in dotted_items.items()]
dotted = six.functools.reduce(merge_dicts, dicts)
params = params.subset([ + k for k in dotted_items.keys()])
params.update(dict(dotted))
return params | Convert dotted keys in :params: dictset to a nested dictset.
E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}} |
23,102 | def reconstruct_from_shape(self, shape, optimize=False):
axes_lengths = list(self.elementary_axes_lengths)
if self.ellipsis_positions != (math.inf, math.inf):
if len(shape) < len(self.input_composite_axes) - 1:
raise EinopsError(.format(
len(self.input_composite_axes) - 1, len(shape)))
else:
if len(shape) != len(self.input_composite_axes):
raise EinopsError(.format(len(self.input_composite_axes), len(shape)))
for input_axis, (known_axes, unknown_axes) in enumerate(self.input_composite_axes):
before_ellipsis = input_axis
after_ellipsis = input_axis + len(shape) - len(self.input_composite_axes)
if input_axis == self.ellipsis_positions[0]:
assert len(known_axes) == 0 and len(unknown_axes) == 1
unknown_axis, = unknown_axes
ellipsis_shape = shape[before_ellipsis:after_ellipsis + 1]
if any(d is None for d in ellipsis_shape):
raise EinopsError("CouldnShape mismatch, {} != {}t divide axis of length {} in chunks of {}".format(
length, known_product))
unknown_axis, = unknown_axes
axes_lengths[unknown_axis] = length // known_product
init_shapes = axes_lengths
reduced_axes_lengths = [dim for i, dim in enumerate(axes_lengths) if i not in self.reduced_elementary_axes]
final_shapes = []
for output_axis, grouping in enumerate(self.output_composite_axes):
if output_axis == self.ellipsis_positions[1]:
final_shapes.extend(ellipsis_shape)
else:
lengths = [reduced_axes_lengths[elementary_axis] for elementary_axis in grouping]
if any(l is None for l in lengths):
final_shapes.append(None)
else:
final_shapes.append(_product(lengths))
reduced_axes = self.reduced_elementary_axes
axes_reordering = self.final_axes_grouping_flat
if optimize:
return _optimize_transformation(init_shapes, reduced_axes, axes_reordering, final_shapes)
else:
return init_shapes, reduced_axes, axes_reordering, final_shapes | Shape is a tuple that may contain integers, shape symbols (tf, keras, theano) and UnknownSize (keras, mxnet)
known axes can be integers or symbols, but not Nones |
23,103 | def execute(self, *args, **kwargs):
with self:
self._cursor.execute(*args, **kwargs) | Analogous to :any:`sqlite3.Cursor.execute`
:returns: self |
23,104 | def _general_error_handler(http_error):
message = str(http_error)
if http_error.respbody is not None:
message += + http_error.respbody.decode()
raise AzureHttpError(message, http_error.status) | Simple error handler for azure. |
23,105 | def selected_exercise(func):
@wraps(func)
def inner(*args, **kwargs):
exercise = Exercise.get_selected()
return func(exercise, *args, **kwargs)
return inner | Passes the selected exercise as the first argument to func. |
23,106 | def fill_blind_pores(im):
r
im = sp.copy(im)
holes = find_disconnected_voxels(im)
im[holes] = False
return im | r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels |
23,107 | def optimize(self, objective_fct, iterations=None, min_iterations=1,
args=(), verb_disp=None, logger=None, call_back=None):
assert iterations is None or min_iterations <= iterations
if not hasattr(self, ):
self.logger = logger
logger = self.logger = logger or self.logger
if not isinstance(call_back, list):
call_back = [call_back]
citer = 0
while not self.stop() or citer < min_iterations:
if iterations is not None and citer >= iterations:
return self.result()
citer += 1
X = self.ask()
fitvals = [objective_fct(x, *args) for x in X]
self.tell(X, fitvals)
self.disp(verb_disp)
for f in call_back:
f is None or f(self)
logger.add(self) if logger else None
try:
logger.add(self, modulo=bool(logger.modulo)) if logger else None
except TypeError:
print( +
+
)
except AttributeError:
print( +
+
)
if verb_disp:
self.disp(1)
if verb_disp in (1, True):
print(, self.stop())
print(, self.result()[1])
print(, self.result()[0])
return self | find minimizer of `objective_fct`.
CAVEAT: the return value for `optimize` has changed to ``self``.
Arguments
---------
`objective_fct`
function be to minimized
`iterations`
number of (maximal) iterations, while ``not self.stop()``
`min_iterations`
minimal number of iterations, even if ``not self.stop()``
`args`
arguments passed to `objective_fct`
`verb_disp`
print to screen every `verb_disp` iteration, if ``None``
the value from ``self.logger`` is "inherited", if
available.
``logger``
a `BaseDataLogger` instance, which must be compatible
with the type of ``self``.
``call_back``
call back function called like ``call_back(self)`` or
a list of call back functions.
``return self``, that is, the `OOOptimizer` instance.
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(7 * [0.1], 0.5
... ).optimize(cma.fcts.rosen, verb_disp=100)
(4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
('termination by', {'tolfun': 1e-11})
('best f-value =', 1.1189867885201275e-14)
('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
0.99999996, 0.99999992]))
>>> print(es.result()[0])
array([ 1. 1. 1. 0.99999999 0.99999998 0.99999996
0.99999992]) |
23,108 | def exprvar(name, index=None):
r
bvar = boolfunc.var(name, index)
try:
var = _LITS[bvar.uniqid]
except KeyError:
var = _LITS[bvar.uniqid] = Variable(bvar)
return var | r"""Return a unique Expression variable.
A Boolean *variable* is an abstract numerical quantity that may assume any
value in the set :math:`B = \{0, 1\}`.
The ``exprvar`` function returns a unique Boolean variable instance
represented by a logic expression.
Variable instances may be used to symbolically construct larger expressions.
A variable is defined by one or more *names*,
and zero or more *indices*.
Multiple names establish hierarchical namespaces,
and multiple indices group several related variables.
If the ``name`` parameter is a single ``str``,
it will be converted to ``(name, )``.
The ``index`` parameter is optional;
when empty, it will be converted to an empty tuple ``()``.
If the ``index`` parameter is a single ``int``,
it will be converted to ``(index, )``.
Given identical names and indices, the ``exprvar`` function will always
return the same variable:
>>> exprvar('a', 0) is exprvar('a', 0)
True
To create several single-letter variables:
>>> a, b, c, d = map(exprvar, 'abcd')
To create variables with multiple names (inner-most first):
>>> fifo_push = exprvar(('push', 'fifo'))
>>> fifo_pop = exprvar(('pop', 'fifo'))
.. seealso::
For creating arrays of variables with incremental indices,
use the :func:`pyeda.boolalg.bfarray.exprvars` function. |
23,109 | def position_fingerprint(
word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG, bits_per_letter=3
):
return Position().fingerprint(word, n_bits, most_common, bits_per_letter) | Return the position fingerprint.
This is a wrapper for :py:meth:`Position.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
bits_per_letter : int
The bits to assign for letter position
Returns
-------
int
The position fingerprint
Examples
--------
>>> bin(position_fingerprint('hat'))
'0b1110100011111111'
>>> bin(position_fingerprint('niall'))
'0b1111110101110010'
>>> bin(position_fingerprint('colin'))
'0b1111111110010111'
>>> bin(position_fingerprint('atcg'))
'0b1110010001111111'
>>> bin(position_fingerprint('entreatment'))
'0b101011111111' |
23,110 | def getByteStatistic(self, wanInterfaceId=1, timeout=1):
namespace = Wan.getServiceType("getByteStatistic") + str(wanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetTotalBytesSent", timeout=timeout)
results2 = self.execute(uri, namespace, "GetTotalBytesReceived", timeout=timeout)
return [int(results["NewTotalBytesSent"]),
int(results2["NewTotalBytesReceived"])] | Execute GetTotalBytesSent&GetTotalBytesReceived actions to get WAN statistics.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: a tuple of two values, total bytes sent and total bytes received
:rtype: list[int] |
23,111 | def run_scratch(self, path_to_scratch, num_cores=1, outname=None, outdir=None, force_rerun=False):
if not outname:
outname = self.project_name
if not outdir:
outdir =
outname = op.join(outdir, outname)
self.out_sspro = .format(outname)
self.out_sspro8 = .format(outname)
self.out_accpro = .format(outname)
self.out_accpro20 = .format(outname)
ssbio.utils.command_runner(
shell_command=.format(path_to_scratch, self.seq_file, outname, num_cores),
force_rerun_flag=force_rerun, outfile_checker=.format(outname)) | Run SCRATCH on the sequence_file that was loaded into the class.
Args:
path_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh
outname: Prefix to name the output files
outdir: Directory to store the output files
force_rerun: Flag to force rerunning of SCRATCH even if the output files exist
Returns: |
23,112 | def from_df(cls, df):
return cls(df[], df[], df[],
ecc=df[], mean_anomaly=df[],
obsx=df[], obsy=df[], obsz=df[]) | Creates an OrbitPopulation from a DataFrame.
:param df:
:class:`pandas.DataFrame` object. Must contain the following
columns: ``['M1','M2','P','ecc','mean_anomaly','obsx','obsy','obsz']``,
i.e., as what is accessed via :attr:`OrbitPopulation.dataframe`.
:return:
:class:`OrbitPopulation`. |
23,113 | def _htmlify_text(self, s):
colored = self._handle_ansi_color_codes(html.escape(s))
return linkify(self._buildroot, colored, self._linkify_memo).replace(, ) | Make text HTML-friendly. |
23,114 | def flush(self):
if os.path.isdir(self._directory):
for root, dirs, files in os.walk(self._directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name)) | Remove all items from the cache. |
23,115 | def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled():
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
for i in range(num_layers):
x_orig = x
x, f, r = tf.split(
layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f)
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h
return tf.reshape(x, x_shape) | SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive. |
23,116 | def as_dict(self, cache=None, fetch=True):
if not self._fetched and fetch:
info = self.fetch(cache)
elif self._use_cache(cache):
info = self._attrs.copy()
else:
info = {}
info.update(url=self.url)
return info | Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached. |
23,117 | def get_site_model(oqparam):
req_site_params = get_gsim_lt(oqparam).req_site_params
arrays = []
for fname in oqparam.inputs[]:
if isinstance(fname, str) and fname.endswith():
sm = read_csv(fname)
if in sm.dtype.names:
raise InvalidFile(
% fname)
z = numpy.zeros(len(sm), sorted(sm.dtype.descr))
for name in z.dtype.names:
z[name] = sm[name]
arrays.append(z)
continue
nodes = nrml.read(fname).siteModel
params = [valid.site_param(node.attrib) for node in nodes]
missing = req_site_params - set(params[0])
if in missing:
missing -= {}
for param in params:
param[] = False
if in missing:
missing -= {}
for param in params:
param[] = False
if missing:
raise InvalidFile( %
(oqparam.inputs[],
.join(missing)))
site_model_dt = numpy.dtype([(p, site.site_param_dt[p])
for p in sorted(params[0])])
sm = numpy.array([tuple(param[name] for name in site_model_dt.names)
for param in params], site_model_dt)
arrays.append(sm)
return numpy.concatenate(arrays) | Convert the NRML file into an array of site parameters.
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:returns:
an array with fields lon, lat, vs30, ... |
23,118 | def rpc(self, request, args):
if request.method != :
return self.error(405, request)
payload = request.get_data(as_text=True) or
request_method = request.args.get()
if not request_method:
return self.error(
400, request,
message="A query string parameter method= is missing."
)
name_map = self.service.__nirum_method_names__
try:
method_facial_name = name_map.behind_names[request_method]
except KeyError:
return self.error(
400,
request,
message="Service doesn{}{}{}{}_return_v{0}{1}{2}'.".format(
typing._type_repr(result.__class__),
request_method,
typing._type_repr(return_type)
)
)
else:
return self._raw_response(200, serialize_meta(result)) | RPC
:param request:
:args ???: |
23,119 | def validate_fields_only_with_permissions(self, val, caller_permissions):
self.validate_fields_only(val)
for extra_permission in caller_permissions.permissions:
all_field_names = .format(extra_permission)
for field_name in getattr(self.definition, all_field_names, set()):
if not hasattr(val, field_name):
raise ValidationError("missing required field " % field_name) | To pass field validation, no required field should be missing.
This method assumes that the contents of each field have already been
validated on assignment, so it's merely a presence check.
Should only be called for callers with extra permissions. |
23,120 | def from_pyfile(self, filename: str, silent: bool=False) -> None:
file_path = self.root_path / filename
try:
spec = importlib.util.spec_from_file_location("module.name", file_path)
if spec is None:
parser = ConfigParser()
parser.optionxform = str
with open(file_path) as file_:
config_str = + file_.read()
parser.read_string(config_str)
self.from_mapping(parser[])
else:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
self.from_object(module)
except (FileNotFoundError, IsADirectoryError):
if not silent:
raise | Load the configuration from a Python cfg or py file.
See Python's ConfigParser docs for details on the cfg format.
It is a common practice to load the defaults from the source
using the :meth:`from_object` and then override with a cfg or
py file, for example
.. code-block:: python
app.config.from_object('config_module')
app.config.from_pyfile('production.cfg')
Arguments:
filename: The filename which when appended to
:attr:`root_path` gives the path to the file |
23,121 | def enroll(self, uuid, organization, from_date=MIN_PERIOD_DATE, to_date=MAX_PERIOD_DATE,
merge=False):
if not uuid or not organization:
return CMD_SUCCESS
try:
api.add_enrollment(self.db, uuid, organization, from_date, to_date)
code = CMD_SUCCESS
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
code = e.code
except AlreadyExistsError as e:
if not merge:
msg_data = {
: uuid,
: organization,
: str(from_date),
: str(to_date)
}
msg = "enrollment for at (from: %(from_dt)s, to: %(to_dt)s) already exists in the registry"
msg = msg % msg_data
self.error(msg)
code = e.code
if not merge:
return code
try:
api.merge_enrollments(self.db, uuid, organization)
except (NotFoundError, InvalidValueError) as e:
raise RuntimeError(str(e))
return CMD_SUCCESS | Enroll a unique identity in an organization.
This method adds a new relationship between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before creating the new enrollment.
The period of the enrollment can be given with the parameters <from_date>
and <to_date>, where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
When "merge" parameter is set to True, those overlapped enrollments related
to <uuid> and <organization> found on the registry will be merged. The given
enrollment will be also merged.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:param merge: merge overlapped enrollments; by default, it is set to False |
23,122 | def _copy_listed(self: T, names) -> T:
variables = OrderedDict()
coord_names = set()
indexes = OrderedDict()
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims)
variables[var_name] = var
if ref_name in self._coord_names or ref_name in self.dims:
coord_names.add(var_name)
if (var_name,) == var.dims:
indexes[var_name] = var.to_index()
needed_dims = set()
for v in variables.values():
needed_dims.update(v.dims)
dims = dict((k, self.dims[k]) for k in needed_dims)
for k in self._coord_names:
if set(self.variables[k].dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
if k in self.indexes:
indexes[k] = self.indexes[k]
return self._replace(variables, coord_names, dims, indexes=indexes) | Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation. |
23,123 | def get_group_policy(self, group_name, policy_name):
params = { : group_name,
: policy_name}
return self.get_response(, params, verb=) | Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get. |
23,124 | def lharmonicmean (inlist):
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum | Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist) |
23,125 | def printf(format, *args):
sys.stdout.write(str(format) % args)
return if_(args, lambda: args[-1], lambda: format) | Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args. |
23,126 | def get_setter(cls, prop_name,
user_setter=None, setter_takes_name=False,
user_getter=None, getter_takes_name=False):
if user_setter:
if setter_takes_name:
def _setter(self, val):
return user_setter(self, prop_name, val)
else: _setter = user_setter
return _setter
def _setter(self, val):
setattr(self, PROP_NAME % { : prop_name}, val)
return
return _setter | Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value. |
23,127 | def load(fin, dtype=np.float32, max_vocab=None):
vocab = {}
arr = None
i = 0
for line in fin:
if max_vocab is not None and i >= max_vocab:
break
try:
token, v = _parse_line(line, dtype)
except (ValueError, IndexError):
raise ParseError(b + line)
if token in vocab:
parse_warn(b + token)
continue
if arr is None:
arr = np.array(v, dtype=dtype).reshape(1, -1)
else:
if arr.shape[1] != len(v):
raise ParseError(b + line)
arr = np.append(arr, [v], axis=0)
vocab[token] = i
i += 1
return arr, vocab | Load word embedding file.
Args:
fin (File): File object to read. File should be open for reading ascii.
dtype (numpy.dtype): Element data type to use for the array.
max_vocab (int): Number of vocabulary to read.
Returns:
numpy.ndarray: Word embedding representation vectors
dict: Mapping from words to vector indices. |
23,128 | def make_article_info_correspondences(self, article_info_div):
corresps = self.article.root.xpath()
if corresps:
corresp_div = etree.SubElement(article_info_div,
,
{: })
for corresp in corresps:
sub_div = etree.SubElement(corresp_div,
,
{: corresp.attrib[]})
append_all_below(sub_div, corresp) | Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content. |
23,129 | def _expand_subsystems(self, scope_infos):
def subsys_deps(subsystem_client_cls):
for dep in subsystem_client_cls.subsystem_dependencies_iter():
if dep.scope != GLOBAL_SCOPE:
yield self._scope_to_info[dep.options_scope]
for x in subsys_deps(dep.subsystem_cls):
yield x
for scope_info in scope_infos:
yield scope_info
if scope_info.optionable_cls is not None:
if issubclass(scope_info.optionable_cls, GlobalOptionsRegistrar):
for scope, info in self._scope_to_info.items():
if info.category == ScopeInfo.SUBSYSTEM and enclosing_scope(scope) == GLOBAL_SCOPE:
yield info
for subsys_dep in subsys_deps(info.optionable_cls):
yield subsys_dep
elif issubclass(scope_info.optionable_cls, SubsystemClientMixin):
for subsys_dep in subsys_deps(scope_info.optionable_cls):
yield subsys_dep | Add all subsystems tied to a scope, right after that scope. |
23,130 | def build_instance_name(inst, obj=None):
if obj is None:
for _ in inst.properties.values():
inst.path.keybindings.__setitem__(_.name, _.value)
return inst.path
if not isinstance(obj, list):
return build_instance_name(inst, get_keys_from_class(obj))
keys = {}
for _ in obj:
if _ not in inst.properties:
raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
"Instance of %s is missing key property %s" \
%(inst.classname, _))
keys[_] = inst[_]
inst.path = pywbem.CIMInstanceName(classname=inst.classname,
keybindings=keys,
namespace=inst.path.namespace,
host=inst.path.host)
return inst.path | Return an instance name from an instance, and set instance.path |
23,131 | def close_socket(sock):
if sock:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
sock.close()
except Exception:
pass | Shutdown and close the socket. |
23,132 | def _merge_wf_outputs(new, cur, parallel):
new_ids = set([])
out = []
for v in new:
outv = {}
outv["source"] = v["id"]
outv["id"] = "%s" % get_base_id(v["id"])
outv["type"] = v["type"]
if "secondaryFiles" in v:
outv["secondaryFiles"] = v["secondaryFiles"]
if tz.get_in(["outputBinding", "secondaryFiles"], v):
outv["secondaryFiles"] = tz.get_in(["outputBinding", "secondaryFiles"], v)
new_ids.add(outv["id"])
out.append(outv)
for outv in cur:
if outv["id"] not in new_ids:
out.append(outv)
return out | Merge outputs for a sub-workflow, replacing variables changed in later steps.
ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps |
23,133 | def run(self, input_func=_stdin_):
self.qcount = 1
for section_name in self.survey:
self.run_section(section_name, input_func) | Run the sections. |
23,134 | def getDigitalID(self,num):
listidx = self.Dn.index(num)
return self.Dch_id[listidx] | Reads the COMTRADE ID of a given channel number.
The number to be given is the same of the COMTRADE header. |
23,135 | def get_knowledge_category_id(self):
if not bool(self._my_map[]):
raise errors.IllegalState()
else:
return Id(self._my_map[]) | Gets the grade ``Id`` associated with the knowledge dimension.
return: (osid.id.Id) - the grade ``Id``
raise: IllegalState - ``has_knowledge_category()`` is ``false``
*compliance: mandatory -- This method must be implemented.* |
23,136 | def _display_big_warning(self, content):
print("")
print(BOLD + WARNING + "--- WARNING ---" + ENDC)
print(WARNING + content + ENDC)
print("") | Displays a BIG warning |
23,137 | def data_contains_key_builder(key: str) -> NodePredicate:
def data_contains_key(_: BELGraph, node: BaseEntity) -> bool:
return key in node
return data_contains_key | Build a filter that passes only on nodes that have the given key in their data dictionary.
:param key: A key for the node's data dictionary |
23,138 | def get_languages_from_item(ct_item, item):
try:
item_lan = TransItemLanguage.objects.filter(content_type__pk=ct_item.id, object_id=item.id).get()
languages = [lang.code for lang in item_lan.languages.all()]
return languages
except TransItemLanguage.DoesNotExist:
return [] | Get the languages configured for the current item
:param ct_item:
:param item:
:return: |
23,139 | def setup_signals(self, ):
prjlvl = self.prjbrws.get_level(0)
prjlvl.new_root.connect(self.update_browsers)
for rb in self._releasetype_button_mapping.values():
rb.toggled.connect(self.releasetype_btn_toggled)
shotdesclvl = self.shotbrws.get_level(3)
shotselcb = partial(self.selection_changed,
source=self.shotbrws,
update=self.shotverbrws,
commentbrowser=self.shotcommentbrws,
mapper=self.shot_info_mapper)
shotdesclvl.new_root.connect(shotselcb)
shotverlvl = self.shotverbrws.get_level(0)
shotverlvl.new_root.connect(self.shot_ver_sel_changed)
shotmappercb = partial(self.set_mapper_index, mapper=self.shot_info_mapper)
shotverlvl.new_root.connect(shotmappercb)
shotverlvl.new_root.connect(partial(self.shotcommentbrws.set_root, 0))
assetdesclvl = self.assetbrws.get_level(3)
assetselcb = partial(self.selection_changed,
source=self.assetbrws,
update=self.assetverbrws,
commentbrowser=self.assetcommentbrws,
mapper=self.asset_info_mapper)
assetdesclvl.new_root.connect(assetselcb)
assetverlvl = self.assetverbrws.get_level(0)
assetverlvl.new_root.connect(self.asset_ver_sel_changed)
assetmappercb = partial(self.set_mapper_index, mapper=self.asset_info_mapper)
assetverlvl.new_root.connect(assetmappercb)
assetverlvl.new_root.connect(partial(self.assetcommentbrws.set_root, 0))
self.current_pb.clicked.connect(self.set_to_current)
self.asset_open_path_tb.clicked.connect(self.open_asset_path)
self.shot_open_path_tb.clicked.connect(self.open_shot_path)
self.refresh_tb.clicked.connect(self.refresh) | Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None |
23,140 | def to_xml(self, xml_declaration=True):
xml = ET.tostring(self.xml()).decode()
return .format(xml) if xml_declaration else xml | Return the contents of this verb as an XML string
:param bool xml_declaration: Include the XML declaration. Defaults to True |
23,141 | def masters_by_queue(self, region, queue):
url, query = LeagueApiV4Urls.master_by_queue(region=region, queue=queue)
return self._raw_request(self.masters_by_queue.__name__, region, url, query) | Get the master league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the master players for
:returns: LeagueListDTO |
23,142 | def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None):
domain = recipient_address[recipient_address.find() + 1:]
if helo_hostname is None:
helo_hostname = domain
ret = {: 101, : None, : "The server is unable to connect."}
records = []
try:
records = mx_resolver.get_mx_records(helo_hostname)
except socket.gaierror:
ret[] = 512
ret[] = "5.1.2 Domain name address resolution failed in MX lookup."
smtp = smtplib.SMTP(timeout=smtp_timeout)
for mx in records:
try:
connection_status, connection_message = smtp.connect(mx.exchange)
if connection_status == 220:
smtp.helo(domain)
smtp.mail(sender_address)
status, message = smtp.rcpt(recipient_address)
ret[] = status
pattern = re.compile()
matches = re.match(pattern, message)
if matches:
ret[] = matches.group(1)
ret[] = message
smtp.quit()
break
except smtplib.SMTPConnectError:
ret[] = 111
ret[] = "Connection refused or unable to open an SMTP stream."
except smtplib.SMTPServerDisconnected:
ret[] = 111
ret[] = "SMTP Server disconnected"
except socket.gaierror:
ret[] = 512
ret[] = "5.1.2 Domain name address resolution failed."
return ret | Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict |
23,143 | def get(self, key: Any, default: Any = None) -> Any:
if key in self:
return self[key].value
return default | 获取 cookie 中的 value |
23,144 | def _iter_candidate_groups(self, init_match, edges0, edges1):
sources = {}
for start_vertex0, end_vertex0 in edges0:
l = sources.setdefault(start_vertex0, [])
l.append(end_vertex0)
dests = {}
for start_vertex1, end_vertex1 in edges1:
start_vertex0 = init_match.reverse[start_vertex1]
l = dests.setdefault(start_vertex0, [])
l.append(end_vertex1)
for start_vertex0, end_vertices0 in sources.items():
end_vertices1 = dests.get(start_vertex0, [])
yield end_vertices0, end_vertices1 | Divide the edges into groups |
23,145 | def AddBlob(self, blob_id, length):
if self.finalized and length > 0:
raise IOError("Can't add blobs to finalized BlobImage")
self.content_dirty = True
self.index.seek(0, 2)
self.index.write(blob_id.AsBytes())
self.size += length
if length < self.chunksize:
self.finalized = True | Add another blob to this image using its hash.
Once a blob is added that is smaller than the chunksize we finalize the
file, since handling adding more blobs makes the code much more complex.
Args:
blob_id: rdf_objects.BlobID object.
length: int length of blob
Raises:
IOError: if blob has been finalized. |
23,146 | def disconnect(self, chassis_list):
self._check_session()
if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)):
chassis_list = (chassis_list,)
if len(chassis_list) == 1:
self._rest.delete_request(, chassis_list[0])
else:
params = {chassis: True for chassis in chassis_list}
params[] =
self._rest.post_request(, None, params) | Remove connection with one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names) |
23,147 | def cli(ctx, dname, site):
assert isinstance(ctx, Context)
dname = domain_parse(dname).hostname
domain = Session.query(Domain).filter(Domain.name == dname).first()
if not domain:
click.secho(.format(dn=dname), fg=, bold=True, err=True)
return
site_name = site
site = Site.get(domain, site_name)
if not site:
click.secho(.format(site=site_name), fg=, bold=True, err=True)
return
p = Echo()
site.enable()
p.done()
p = Echo()
FNULL = open(os.devnull, )
subprocess.check_call([, , ], stdout=FNULL, stderr=subprocess.STDOUT)
p.done() | Enable the <site> under the specified <domain> |
23,148 | def unzoom(self, event=None, set_bounds=True):
lims = None
if len(self.conf.zoom_lims) > 1:
lims = self.conf.zoom_lims.pop()
ax = self.axes
if lims is None:
self.conf.zoom_lims = [None]
xmin, xmax, ymin, ymax = self.data_range
lims = {self.axes: [xmin, xmax, ymin, ymax]}
self.set_viewlimits()
self.canvas.draw() | zoom out 1 level, or to full data range |
23,149 | def item(self, current_item):
return {
: text(getattr(current_item, self.get_field_name())),
: self.label(current_item)
} | Return the current item.
@param current_item: Current item
@type param: django.models
@return: Value and label of the current item
@rtype : dict |
23,150 | def all(self):
partitions = []
for partition in self.index.searcher().documents():
partitions.append(
PartitionSearchResult(dataset_vid=partition[], vid=partition[], score=1))
return partitions | Returns list with all indexed partitions. |
23,151 | def auth_required(*auth_methods):
login_mechanisms = {
: lambda: _check_token(),
: lambda: _check_http_auth(),
: lambda: current_user.is_authenticated
}
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
h = {}
mechanisms = [(method, login_mechanisms.get(method))
for method in auth_methods]
for method, mechanism in mechanisms:
if mechanism and mechanism():
return fn(*args, **kwargs)
elif method == :
r = _security.default_http_auth_realm
h[] = % r
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_response(headers=h)
return decorated_view
return wrapper | Decorator that protects enpoints through multiple mechanisms
Example::
@app.route('/dashboard')
@auth_required('token', 'session')
def dashboard():
return 'Dashboard'
:param auth_methods: Specified mechanisms. |
23,152 | def gen_passwd(self):
post_data = self.get_post_data()
userinfo = MUser.get_by_name(post_data[])
sub_timestamp = int(post_data[])
cur_timestamp = tools.timestamp()
if cur_timestamp - sub_timestamp < 600 and cur_timestamp > sub_timestamp:
pass
else:
kwd = {
: ,
: ,
}
self.set_status(400)
self.render(,
kwd=kwd,
userinfo=self.userinfo)
hash_str = tools.md5(userinfo.user_name + post_data[] + userinfo.user_pass)
if hash_str == post_data[]:
pass
else:
kwd = {
: ,
: ,
}
self.set_status(400)
self.render(,
kwd=kwd,
userinfo=self.userinfo, )
new_passwd = tools.get_uu8d()
MUser.update_pass(userinfo.uid, new_passwd)
kwd = {
: userinfo.user_name,
: new_passwd,
}
self.render(,
cfg=config.CMS_CFG,
kwd=kwd,
userinfo=self.userinfo, ) | reseting password |
23,153 | def add_source(self, name, src_dict, free=None, init_source=True,
save_source_maps=True, use_pylike=True,
use_single_psf=False, **kwargs):
if self.roi.has_source(name):
msg = % name
self.logger.error(msg)
raise Exception(msg)
loglevel = kwargs.pop(, self.loglevel)
self.logger.log(loglevel, + name)
src = self.roi.create_source(name, src_dict, rescale=True)
self.make_template(src)
for c in self.components:
c.add_source(name, src_dict, free=free,
save_source_maps=save_source_maps,
use_pylike=use_pylike,
use_single_psf=use_single_psf)
if self._like is None:
return
if self.config[][] and src.name not in \
self.config[][]:
self.set_edisp_flag(src.name, True)
self.like.syncSrcParams(str(name))
self.like.model = self.like.components[0].model
if init_source:
self._init_source(name)
self._update_roi()
if self._fitcache is not None:
self._fitcache.update_source(name) | Add a source to the ROI model. This function may be called
either before or after `~fermipy.gtanalysis.GTAnalysis.setup`.
Parameters
----------
name : str
Source name.
src_dict : dict or `~fermipy.roi_model.Source` object
Dictionary or source object defining the source properties
(coordinates, spectral parameters, etc.).
free : bool
Initialize the source with a free normalization parameter.
use_pylike : bool
Create source maps with pyLikelihood.
use_single_psf : bool
Use the PSF model calculated for the ROI center. If false
then a new model will be generated using the position of
the source. |
23,154 | def from_datetime(self, dt):
global _last_timestamp
epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo)
offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0
timestamp = (dt - epoch).total_seconds() - offset
node = None
clock_seq = None
nanoseconds = int(timestamp * 1e9)
timestamp = int(nanoseconds // 100) + 0x01b21dd213814000
if clock_seq is None:
import random
clock_seq = random.randrange(1 << 14)
time_low = timestamp & 0xffffffff
time_mid = (timestamp >> 32) & 0xffff
time_hi_version = (timestamp >> 48) & 0x0fff
clock_seq_low = clock_seq & 0xff
clock_seq_hi_variant = (clock_seq >> 8) & 0x3f
if node is None:
node = getnode()
return pyUUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1) | generates a UUID for a given datetime
:param dt: datetime
:type dt: datetime
:return: |
23,155 | def cancel(self):
if not self.id:
raise WorkflowError()
if self.batch_values:
self.workflow.batch_workflow_cancel(self.id)
else:
self.workflow.cancel(self.id) | Cancel a running workflow.
Args:
None
Returns:
None |
23,156 | def diagnose_embedding(emb, source, target):
if not hasattr(source, ):
source = nx.Graph(source)
if not hasattr(target, ):
target = nx.Graph(target)
label = {}
embedded = set()
for x in source:
try:
embx = emb[x]
missing_chain = len(embx) == 0
except KeyError:
missing_chain = True
if missing_chain:
yield MissingChainError, x
continue
all_present = True
for q in embx:
if label.get(q, x) != x:
yield ChainOverlapError, q, x, label[q]
elif q not in target:
all_present = False
yield InvalidNodeError, x, q
else:
label[q] = x
if all_present:
embedded.add(x)
if not nx.is_connected(target.subgraph(embx)):
yield DisconnectedChainError, x
yielded = nx.Graph()
for p, q in target.subgraph(label).edges():
yielded.add_edge(label[p], label[q])
for x, y in source.edges():
if x == y:
continue
if x in embedded and y in embedded and not yielded.has_edge(x, y):
yield MissingEdgeError, x, y | A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains |
23,157 | def _index_range(self, version, symbol, from_version=None, **kwargs):
from_index = None
if from_version:
from_index = from_version[]
return from_index, None | Tuple describing range to read from the ndarray - closed:open |
23,158 | def run_evaluate(self) -> None:
result = None
self.eval_error = False
if self._needs_evaluation:
result = self._schema.value.evaluate(self._evaluation_context)
self.eval_error = result is None
if self.eval_error:
return
if not self._schema.is_type_of(result):
try:
result = self._schema.type_object(result)
except Exception as err:
logging.debug(.format(
type(err).__name__, result, self._schema.type,
self._schema.fully_qualified_name, err))
self.eval_error = True
return
try:
result = self._schema.sanitize_object(result)
except Exception as err:
logging.debug(.format(
type(err).__name__, result, self._schema.type, self._schema.fully_qualified_name,
err))
self.eval_error = True
return
self.value = result | Overrides the base evaluation to set the value to the evaluation result of the value
expression in the schema |
23,159 | def parse(value, pattern=):
expressions = {
: ,
: ,
: ,
: ,
: ,
:
}
pattern_regex = re.escape(pattern)
for key, expression in expressions.items():
pattern_regex = pattern_regex.replace(
.format(key),
expression
)
pattern_regex = .format(pattern_regex)
match = re.search(pattern_regex, value)
if match is None:
raise ValueError()
groups = match.groupdict()
if in groups and groups[]:
groups[] = int(groups[])
else:
groups[] = 0
collection = Collection(
groups.get(, ),
groups.get(, ),
groups[]
)
if groups.get(, None) is not None:
start, end = map(int, groups[].split())
collection.indexes.update(range(start, end + 1))
if groups.get(, None) is not None:
parts = [part.strip() for part in groups[].split()]
for part in parts:
index_range = list(map(int, part.split(, 2)))
if len(index_range) > 1:
for index in range(index_range[0], index_range[1] + 1):
collection.indexes.add(index)
else:
collection.indexes.add(index_range[0])
if in groups:
parts = [part.strip() for part in groups[].split()]
for part in parts:
index_range = map(int, part.split(, 2))
if len(index_range) > 1:
for index in range(index_range[0], index_range[1] + 1):
collection.indexes.remove(index)
else:
collection.indexes.remove(index_range[0])
return collection | Parse *value* into a :py:class:`~clique.collection.Collection`.
Use *pattern* to extract information from *value*. It may make use of the
following keys:
* *head* - Common leading part of the collection.
* *tail* - Common trailing part of the collection.
* *padding* - Padding value in ``%0d`` format.
* *range* - Total range in the form ``start-end``.
* *ranges* - Comma separated ranges of indexes.
* *holes* - Comma separated ranges of missing indexes.
.. note::
*holes* only makes sense if *range* or *ranges* is also present. |
23,160 | def get_title(self, obj):
search_title = self.get_model_config_value(obj, )
if not search_title:
return super().get_title(obj)
return search_title.format(**obj.__dict__) | Set search entry title for object |
23,161 | def tcp_receive(self):
data = self.conn.recv(self.BUFFER_SIZE)
if type(data) != str:
data = data.decode("utf-8")
return str(data) | Receive data from TCP port. |
23,162 | def compile_tilebus(files, env, outdir=None, header_only=False):
if outdir is None:
dirs = env["ARCH"].build_dirs()
outdir = dirs[]
cmdmap_c_path = os.path.join(outdir, )
cmdmap_h_path = os.path.join(outdir, )
config_c_path = os.path.join(outdir, )
config_h_path = os.path.join(outdir, )
if header_only:
return env.Command([cmdmap_h_path, config_h_path], files,
action=env.Action(tb_h_file_creation, "Creating header files from TileBus definitions"))
else:
env[] = + cmdmap_c_path
return env.Command([cmdmap_c_path, cmdmap_h_path, config_c_path, config_h_path], files,
action=env.Action(tb_c_file_creation, "Compiling TileBus commands and config variables")) | Given a path to a *.cdb file, process it and generate c tables and/or headers containing the information. |
23,163 | def _delete(self, pos, idx):
_maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index
keys_pos = _keys[pos]
lists_pos = _lists[pos]
del keys_pos[idx]
del lists_pos[idx]
self._len -= 1
len_keys_pos = len(keys_pos)
if len_keys_pos > self._half:
_maxes[pos] = keys_pos[-1]
if len(_index) > 0:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_keys) > 1:
if not pos:
pos += 1
prev = pos - 1
_keys[prev].extend(_keys[pos])
_lists[prev].extend(_lists[pos])
_maxes[prev] = _keys[prev][-1]
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
self._expand(prev)
elif len_keys_pos:
_maxes[pos] = keys_pos[-1]
else:
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:] | Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc. |
23,164 | def image_import(infile, force):
ecode = 0
try:
with open(infile, ) as FH:
savelist = json.loads(FH.read())
except Exception as err:
anchore_print_err("could not load input file: " + str(err))
ecode = 1
if ecode == 0:
for record in savelist:
try:
imageId = record[][]
if contexts[].is_image_present(imageId) and not force:
anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.")
else:
imagedata = record[][]
try:
rc = contexts[].save_image_new(imageId, report=imagedata)
if not rc:
contexts[].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts[].delete_image(imageId)
raise err
except Exception as err:
anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err))
ecode = 1
sys.exit(ecode) | Import image anchore data from a JSON file. |
23,165 | def convert_branch(self, old_node, new_node, ids_to_skip, comment_dict=None):
expected_tag =
if old_node.tag != expected_tag:
raise DowngradeError( % expected_tag)
if not comment_dict:
comment_dict = {}
for node in old_node.getchildren():
node_id = node.get()
if node_id in ids_to_skip:
continue
if node.tag == :
negation = node.get()
condition = node.get()
if in negation.lower():
new_condition = condition +
else:
new_condition = condition
document = node.xpath()[0]
search = node.xpath()[0]
content_type = node.xpath()[0]
content = node.findtext()
context_type = node.xpath()[0]
new_ii_node = ioc_api.make_indicatoritem_node(condition=condition,
document=document,
search=search,
content_type=content_type,
content=content,
context_type=context_type,
nid=node_id)
new_ii_node.attrib[] = new_condition
if node_id in comment_dict:
comment = comment_dict[node_id]
comment_node = et.Element()
comment_node.text = comment
new_ii_node.append(comment_node)
del new_ii_node.attrib[]
del new_ii_node.attrib[]
new_node.append(new_ii_node)
elif node.tag == :
operator = node.get()
if operator.upper() not in [, ]:
raise DowngradeError( % (node_id, operator))
new_i_node = ioc_api.make_indicator_node(operator, node_id)
new_node.append(new_i_node)
self.convert_branch(node, new_i_node, ids_to_skip, comment_dict)
else:
raise DowngradeError()
return True | Recursively walk a indicator logic tree, starting from a Indicator node.
Converts OpenIOC 1.1 Indicator/IndicatorItems to Openioc 1.0 and preserves order.
:param old_node: An Indicator node, which we walk down to convert
:param new_node: An Indicator node, which we add new IndicatorItem and Indicator nodes too
:param ids_to_skip: set of node @id values not to convert
:param comment_dict: maps ids to comment values. only applied to IndicatorItem nodes
:return: returns True upon completion.
:raises: DowngradeError if there is a problem during the conversion. |
23,166 | def _from_dict(cls, _dict):
args = {}
if in _dict:
args[] = MessageInput._from_dict(_dict.get())
if in _dict:
args[] = [
RuntimeIntent._from_dict(x) for x in (_dict.get())
]
if in _dict:
args[] = [
RuntimeEntity._from_dict(x) for x in (_dict.get())
]
return cls(**args) | Initialize a DialogSuggestionValue object from a json dictionary. |
23,167 | def main():
p = argparse.ArgumentParser()
p.add_argument(
,
help="Directory to check"
)
p.add_argument(
, , action=,
help="increase verbosity"
)
args = p.parse_args()
import sys
_changed = changed(sys.argv[1], args=args)
sys.exit(_changed) | Return exit code of zero iff directory is not changed. |
23,168 | def fix_flags(self, flags):
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
)
if not (flags.logdir or flags.event_file):
raise FlagsError()
elif not flags.db and not flags.logdir:
raise FlagsError(
)
if flags.path_prefix.endswith():
flags.path_prefix = flags.path_prefix[:-1] | Fixes standard TensorBoard CLI flags to parser. |
23,169 | def get_members_of_group(self, gname):
hostgroup = self.find_by_name(gname)
if hostgroup:
return hostgroup.get_services()
return [] | Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of the services in the group
:rtype: list[alignak.objects.service.Service] |
23,170 | def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step):
all_mt_feats = numpy.array([])
signal_idx = numpy.array([])
process_times = []
types = (, , , )
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files)))
wav_file_list = sorted(wav_file_list)
for i, wavFile in enumerate(wav_file_list):
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue
x = audioBasicIO.stereo2mono(x)
[mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win),
round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
if len(all_mt_feats) == 0:
all_mt_feats = mt_term_feats
signal_idx = numpy.zeros((mt_term_feats.shape[0], ))
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0], )))
return (all_mt_feats, signal_idx, wav_file_list) | This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames: |
23,171 | def partition(predicate, iterable):
t1, t2 = tee(iterable)
return filterfalse(predicate, t1), filter(predicate, t2) | Use a predicate to partition true and false entries.
Reference
---------
Python itertools documentation. |
23,172 | def write_version(name=None, path=None):
if name in (None, ):
path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"version.json")
contents = {
: __version__,
: __version_string__,
}
with open(path, ) as filehandle:
filehandle.write(json.dumps(contents, sort_keys=True, indent=4)) | Write the version info to ../version.json, for setup.py.
Args:
name (Optional[str]): this is for the ``write_version(name=__name__)``
below. That's one way to both follow the
``if __name__ == '__main__':`` convention but also allow for full
coverage without ignoring parts of the file.
path (Optional[str]): the path to write the version json to. Defaults
to ../version.json |
23,173 | def check_voltage(grid, mode):
crit_nodes = {}
if mode == :
mv_max_v_level_lc_diff_normal = float(cfg_ding0.get(,
))
mv_max_v_level_fc_diff_normal = float(cfg_ding0.get(,
))
if (abs(voltage_station[0] - node.voltage_res[0]) > mv_max_v_level_lc_diff_normal) or\
(abs(voltage_station[1] - node.voltage_res[1]) > mv_max_v_level_fc_diff_normal):
crit_nodes[node] = {: node,
: max([abs(v2-v1) for v1, v2 in zip(node.voltage_res, voltage_station)])}
except:
pass
elif mode == :
raise NotImplementedError
if crit_nodes:
logger.info(.format(len(crit_nodes)))
return [_[] for _ in sorted(crit_nodes.values(), key=lambda _: _[], reverse=True)] | Checks for voltage stability issues at all nodes for MV or LV grid
Parameters
----------
grid : GridDing0
Grid identifier.
mode : str
Kind of grid ('MV' or 'LV').
Returns
-------
:any:`list` of :any:`GridDing0`
List of critical nodes, sorted descending by voltage difference.
Notes
-----
The examination is done in two steps, according to [#]_ :
1. It is checked #TODO: what?
2. #TODO: what's next?
References
----------
.. [#] dena VNS |
23,174 | def handler(self):
printtime(, self.start)
self.populate()
printtime(.format(self.analysistype), self.start)
self.profiler()
self.annotatethreads()
self.cdsthreads()
self.cdssequencethreads()
self.allelematchthreads()
printtime(.format(self.analysistype), self.start)
self.sequencetyper()
printtime(.format(self.analysistype), self.start)
self.reporter() | Run the required analyses |
23,175 | def create(context, job_id, name, type, url, data):
result = analytic.create(context, job_id=job_id, name=name, type=type,
url=url, data=data)
utils.format_output(result, context.format) | create(context, job_id, name, type, url, data)
Create an analytic.
>>> dcictl analytic-create [OPTIONS]
:param string job-id: The job on which to attach the analytic
:param string name: Name of the analytic [required]
:param string type: Type of the analytic [required]
:param string url: Url of the bug [optional]
:param string data: JSON data of the analytic |
23,176 | def _append(lst, indices, value):
for i, idx in enumerate(indices):
while len(lst) <= idx:
lst.append([])
lst = lst[idx]
lst.append(value) | Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required. |
23,177 | def _imm_merge_class(cls, parent):
if not hasattr(parent, ): return cls
return cls | _imm_merge_class(imm_class, parent) updates the given immutable class imm_class to have the
appropriate attributes of its given parent class. The parents should be passed through this
function in method-resolution order. |
23,178 | def AuthenticateSessionId(self, username, password):
self.__setAuthenticationMethod__()
parameters = {:username, :password}
if self.__SenseApiCall__("/login.json", "POST", parameters = parameters):
try:
response = json.loads(self.__response__)
except:
self.__setAuthenticationMethod__()
self.__error__ = "notjson"
return False
try:
self.__session_id__ = response[]
self.__setAuthenticationMethod__()
return True
except:
self.__setAuthenticationMethod__()
self.__error__ = "no session_id"
return False
else:
self.__setAuthenticationMethod__()
self.__error__ = "api call unsuccessful"
return False | Authenticate using a username and password.
The SenseApi object will store the obtained session_id internally until a call to LogoutSessionId is performed.
@param username (string) - CommonSense username
@param password (string) - MD5Hash of CommonSense password
@return (bool) - Boolean indicating whether AuthenticateSessionId was successful |
23,179 | def create_precursor_quant_lookup(quantdb, mzmlfn_feats, quanttype,
rttol, mztol, mztoltype):
featparsermap = {: kronik_featparser,
: openms_featparser,
}
features = []
mzmlmap = quantdb.get_mzmlfile_map()
for specfn, feat_element in mzmlfn_feats:
feat = featparsermap[quanttype](feat_element)
features.append((mzmlmap[specfn], feat[], feat[],
feat[], feat[])
)
if len(features) == DB_STORE_CHUNK:
quantdb.store_ms1_quants(features)
features = []
quantdb.store_ms1_quants(features)
quantdb.index_precursor_quants()
align_quants_psms(quantdb, rttol, mztol, mztoltype) | Fills quant sqlite with precursor quant from:
features - generator of xml features from openms |
23,180 | def decode(self, input, final=False):
decoder = self._decoder
if decoder is not None:
return decoder(input, final)
input = self._buffer + input
encoding, input = _detect_bom(input)
if encoding is None:
if len(input) < 3 and not final:
self._buffer = input
return
else:
encoding = self._fallback_encoding
decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
self._decoder = decoder
self.encoding = encoding
return decoder(input, final) | Decode one chunk of the input.
:param input: A byte string.
:param final:
Indicate that no more input is available.
Must be :obj:`True` if this is the last call.
:returns: An Unicode string. |
23,181 | def get_customer_transitions(self, issue_id_or_key):
url = .format(issue_id_or_key)
return self.get(url, headers=self.experimental_headers) | Returns a list of transitions that customers can perform on the request
:param issue_id_or_key: str
:return: |
23,182 | def processFlat(self):
F = self._preprocess()
F = msaf.utils.normalize(F, norm_type=self.config["bound_norm_feats"])
if self.config["M_gaussian"] % 2 == 1:
self.config["M_gaussian"] += 1
F = median_filter(F, M=self.config["m_median"])
S = compute_ssm(F)
G = compute_gaussian_krnl(self.config["M_gaussian"])
nc = compute_nc(S, G)
est_idxs = pick_peaks(nc, L=self.config["L_peaks"])
est_idxs = np.concatenate(([0], est_idxs, [F.shape[0] - 1]))
est_labels = np.ones(len(est_idxs) - 1) * -1
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
return est_idxs, est_labels | Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments. |
23,183 | def flatten_probas(probas, labels, ignore=None):
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels | Flattens predictions in the batch |
23,184 | def init_app(state):
app = state.app
app.config.setdefault(, False)
app.config.setdefault(, False)
app.config.setdefault(, [])
app.config.setdefault(, r)
app.jinja_env.globals.update({
: ab_test,
: finished
})
@app.template_filter()
def percentage(number):
number *= 100
if abs(number) < 10:
return "%.1f%%" % round(number, 1)
else:
return "%d%%" % round(number) | Prepare the Flask application for Flask-Split.
:param state: :class:`BlueprintSetupState` instance |
23,185 | def hex_color(value):
r = ((value >> (8 * 2)) & 255) / 255.0
g = ((value >> (8 * 1)) & 255) / 255.0
b = ((value >> (8 * 0)) & 255) / 255.0
return (r, g, b) | Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and
returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0. |
23,186 | def validate_request_method_to_operation(request_method, path_definition):
try:
operation_definition = path_definition[request_method]
except KeyError:
allowed_methods = set(REQUEST_METHODS).intersection(path_definition.keys())
raise ValidationError(
MESSAGES[][].format(
request_method, allowed_methods,
),
)
return operation_definition | Given a request method, validate that the request method is valid for the
api path.
If so, return the operation definition related to this request method. |
23,187 | def _append_data(self, value, _file):
_tabs = * self._tctr
_text = base64.b64encode(value).decode()
_labs = .format(tabs=_tabs, text=_text)
_file.write(_labs) | Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file |
23,188 | def run(self, job_list):
if self._closed:
raise RuntimeError("master is closed")
return self._manager.add_job_set(job_list) | Runs a job set which consists of the jobs in an iterable job list. |
23,189 | def NewFd(self, fd, URL, encoding, options):
ret = libxml2mod.xmlReaderNewFd(self._o, fd, URL, encoding, options)
return ret | Setup an xmltextReader to parse an XML from a file
descriptor. NOTE that the file descriptor will not be
closed when the reader is closed or reset. The parsing
flags @options are a combination of xmlParserOption. This
reuses the existing @reader xmlTextReader. |
23,190 | def create_generate(kind, project, resource, offset):
generate = eval( + kind.title())
return generate(project, resource, offset) | A factory for creating `Generate` objects
`kind` can be 'variable', 'function', 'class', 'module' or
'package'. |
23,191 | def create(blocks, mode=, inplanes=16, divisor=4, num_classes=1000):
block_dict = {
: BasicBlock,
: Bottleneck
}
def instantiate(**_):
return ResNetV2(block_dict[mode], blocks, inplanes=inplanes, divisor=divisor, num_classes=num_classes)
return ModelFactory.generic(instantiate) | Vel factory function |
23,192 | def _pybossa_req(method, domain, id=None, payload=None, params={},
headers={: },
files=None):
url = _opts[] + + domain
if id is not None:
url += + str(id)
if in _opts:
params[] = _opts[]
if method == :
r = requests.get(url, params=params)
elif method == :
if files is None and headers[] == :
r = requests.post(url, params=params, headers=headers,
data=json.dumps(payload))
else:
r = requests.post(url, params=params, files=files, data=payload)
elif method == :
r = requests.put(url, params=params, headers=headers,
data=json.dumps(payload))
elif method == :
r = requests.delete(url, params=params, headers=headers,
data=json.dumps(payload))
if r.status_code // 100 == 2:
if r.text and r.text != :
return json.loads(r.text)
else:
return True
else:
return json.loads(r.text) | Send a JSON request.
Returns True if everything went well, otherwise it returns the status
code of the response. |
23,193 | def _handle_request(self, scheme, netloc, path, headers, body=None, method="GET"):
backend_url = "{}://{}{}".format(scheme, netloc, path)
try:
response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers))
self._return_response(response)
except Exception as e:
body = "Invalid response from backend: Server might be busy".format(e.message)
logging.debug(body)
self.send_error(httplib.SERVICE_UNAVAILABLE, body) | Run the actual request |
23,194 | def to_xdr_object(self):
bump_sequence_op = Xdr.types.BumpSequenceOp(self.bump_to)
self.body.type = Xdr.const.BUMP_SEQUENCE
self.body.bumpSequenceOp = bump_sequence_op
return super(BumpSequence, self).to_xdr_object() | Creates an XDR Operation object that represents this
:class:`BumpSequence`. |
23,195 | def newChild(self, ns, name, content):
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewChild(self._o, ns__o, name, content)
if ret is None:raise treeError()
__tmp = xmlNode(_obj=ret)
return __tmp | Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child list containing the TEXTs and ENTITY_REFs node will
be created. NOTE: @content is supposed to be a piece of XML
CDATA, so it allows entity references. XML special chars
must be escaped first by using
xmlEncodeEntitiesReentrant(), or xmlNewTextChild() should
be used. |
23,196 | def update_warning(self):
new_qsequence = self.new_qsequence
new_sequence = self.new_sequence
self.text_new_sequence.setText(
new_qsequence.toString(QKeySequence.NativeText))
conflicts = self.check_conflicts()
if len(self._qsequences) == 0:
warning = SEQUENCE_EMPTY
tip =
icon = QIcon()
elif conflicts:
warning = SEQUENCE_CONFLICT
template =
tip_title = _() +
tip_body =
for s in conflicts:
tip_body += .format(s.context, s.name)
tip_body = tip_body[:-4]
tip_override =
tip_override += if len(conflicts) == 1 else
tip_override += .format(self.name)
tip = template.format(tip_title, tip_body, tip_override)
icon = get_std_icon()
elif new_sequence in BLACKLIST:
warning = IN_BLACKLIST
template =
tip_title = _() +
tip_body =
use = BLACKLIST[new_sequence]
if use is not None:
tip_body = use
tip = template.format(tip_title, tip_body)
icon = get_std_icon()
elif self.check_singlekey() is False or self.check_ascii() is False:
warning = INVALID_KEY
template =
tip = _() +
icon = get_std_icon()
else:
warning = NO_WARNING
tip =
icon = get_std_icon()
self.warning = warning
self.conflicts = conflicts
self.helper_button.setIcon(icon)
self.button_ok.setEnabled(
self.warning in [NO_WARNING, SEQUENCE_CONFLICT])
self.label_warning.setText(tip)
new_height = self.label_warning.sizeHint().height()
self.label_warning.setMaximumHeight(new_height) | Update the warning label, buttons state and sequence text. |
23,197 | def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
log.warn()
if not _python_cmd(, , *install_args):
log.warn()
log.warn()
return 2 | Install Setuptools. |
23,198 | def document_endpoint(endpoint):
descr = clean_description(py_doc_trim(endpoint.__doc__))
docs = {
: endpoint._route_name,
: endpoint._http_method,
: endpoint._uri,
: descr,
: extract_endpoint_arguments(endpoint),
: format_endpoint_returns_doc(endpoint),
}
if hasattr(endpoint, "_success"):
docs["success"] = endpoint._success
if hasattr(endpoint, "_requires_permission"):
docs["requires_permission"] = endpoint._requires_permission
return docs | Extract the full documentation dictionary from the endpoint. |
23,199 | def afx_adafactor():
hparams = afx_adam()
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams | Adafactor with recommended learning rate schedule. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.