Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,000 |
def _get_backend_router(self, locations, item):
mask =
cpu_count = item[]
for capacity in item[]:
for category in capacity[]:
if category[] == :
mem_capacity = capacity[]
if category[] == :
disk_capacity = capacity[]
for hardwareComponent in item[]:
if hardwareComponent[].find("GPU") != -1:
hardwareComponentType = hardwareComponent[][]
gpuComponents = [
{
: {
: {
: hardwareComponent[][],
: {
: hardwareComponentType[]
}
}
}
},
{
: {
: {
: hardwareComponent[][],
: {
: hardwareComponentType[]
}
}
}
}
]
if locations is not None:
for location in locations:
if location[] is not None:
loc_id = location[]
host = {
: cpu_count,
: mem_capacity,
: disk_capacity,
: {
: loc_id
}
}
if item[].find("GPU") != -1:
host[] = gpuComponents
routers = self.host.getAvailableRouters(host, mask=mask)
return routers
raise SoftLayer.SoftLayerError("Could not find available routers")
|
Returns valid router options for ordering a dedicated host.
|
24,001 |
def loads(cls, s):
try:
currency, amount = s.strip().split()
return cls(amount, currency)
except ValueError as err:
money.six.raise_from(ValueError("failed to parse string "
" : {}".format(s, err)), None)
|
Parse from a string representation (repr)
|
24,002 |
def det4D(m):
return (m[0][3]*m[1][2]*m[2][1]*m[3][0] - m[0][2]*m[1][3]*m[2][1]*m[3][0] -
m[0][3]*m[1][1]*m[2][2]*m[3][0] + m[0][1]*m[1][3]*m[2][2]*m[3][0] +
m[0][2]*m[1][1]*m[2][3]*m[3][0] - m[0][1]*m[1][2]*m[2][3]*m[3][0] -
m[0][3]*m[1][2]*m[2][0]*m[3][1] + m[0][2]*m[1][3]*m[2][0]*m[3][1] +
m[0][3]*m[1][0]*m[2][2]*m[3][1] - m[0][0]*m[1][3]*m[2][2]*m[3][1] -
m[0][2]*m[1][0]*m[2][3]*m[3][1] + m[0][0]*m[1][2]*m[2][3]*m[3][1] +
m[0][3]*m[1][1]*m[2][0]*m[3][2] - m[0][1]*m[1][3]*m[2][0]*m[3][2] -
m[0][3]*m[1][0]*m[2][1]*m[3][2] + m[0][0]*m[1][3]*m[2][1]*m[3][2] +
m[0][1]*m[1][0]*m[2][3]*m[3][2] - m[0][0]*m[1][1]*m[2][3]*m[3][2] -
m[0][2]*m[1][1]*m[2][0]*m[3][3] + m[0][1]*m[1][2]*m[2][0]*m[3][3] +
m[0][2]*m[1][0]*m[2][1]*m[3][3] - m[0][0]*m[1][2]*m[2][1]*m[3][3] -
m[0][1]*m[1][0]*m[2][2]*m[3][3] + m[0][0]*m[1][1]*m[2][2]*m[3][3])
|
det4D(array) yields the determinate of the given matrix array, which may have more than 2
dimensions, in which case the later dimensions are multiplied and added point-wise.
|
24,003 |
def crossvalidate_model(self, classifier, data, num_folds, rnd, output=None):
if output is None:
generator = []
else:
generator = [output.jobject]
javabridge.call(
self.jobject, "crossValidateModel",
"(Lweka/classifiers/Classifier;Lweka/core/Instances;ILjava/util/Random;[Ljava/lang/Object;)V",
classifier.jobject, data.jobject, num_folds, rnd.jobject, generator)
|
Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
|
24,004 |
def merge(self, other):
other = BoolCell.coerce(other)
if self.is_equal(other):
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.value = other.value
elif self.is_contradictory(other):
raise Contradiction("Cannot merge T and F")
else:
raise Exception
return self
|
Merges two BoolCells
|
24,005 |
def get_region_products(self, region):
regions, retInfo = self.list_regions()
if regions is None:
return None
for r in regions:
if r.get() == region:
return r.get()
|
获得指定区域的产品信息
Args:
- region: 区域,如:"nq"
Returns:
返回该区域的产品信息,若失败则返回None
|
24,006 |
def _set_fcoe(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("fcoe_interface_name",fcoe.fcoe, yang_name="fcoe", rest_name="Fcoe", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: u, u: u, u: u, u: u, u: u, u: None, u: u}}), is_container=, yang_name="fcoe", rest_name="Fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u, u: u, u: u, u: u, u: u, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__fcoe = t
if hasattr(self, ):
self._set()
|
Setter method for fcoe, mapped from YANG variable /interface/fcoe (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe() directly.
YANG Description: The list of FCoE interfaces. Each row contains FCoE
interface name and its status.
|
24,007 |
def focus(self, f):
ret = copy.copy(self)
ret._focus = f
return ret
|
Get a new UI proxy copy with the given focus. Return a new UI proxy object as the UI proxy is immutable.
Args:
f (2-:obj:`tuple`/2-:obj:`list`/:obj:`str`): the focus point, it can be specified as 2-list/2-tuple
coordinates (x, y) in NormalizedCoordinate system or as 'center' or 'anchor'.
Returns:
:py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`: a new UI proxy object (copy)
|
24,008 |
def create(dataset, label=None, features=None, distance=None, method=,
verbose=True, **kwargs):
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
if features is not None and not isinstance(features, list):
raise TypeError("If specified, input must be a list of " +
"strings.")
allowed_kwargs = [, , ]
_method_options = {}
for k, v in kwargs.items():
if k in allowed_kwargs:
_method_options[k] = v
else:
raise _ToolkitError(" is not a valid keyword argument".format(k) +
" for the nearest neighbors model. Please " +
"check for capitalization and other typos.")
if method == and (distance ==
or distance == _turicreate.distances.cosine
or distance ==
or distance == _turicreate.distances.dot_product
or distance ==
or distance == _turicreate.distances.transformed_dot_product):
raise TypeError("The ball tree method does not work with " +
", or distance." +
"Please use the method for these distances.")
if method == and ( not in _method_options):
if distance == or distance == _turicreate.distances.jaccard:
_method_options[] = 4
elif distance == or distance == _turicreate.distances.cosine:
_method_options[] = 16
else:
_method_options[] = 8
if label is None:
_label = _robust_column_name(, dataset.column_names())
_dataset = dataset.add_row_number(_label)
else:
_label = label
_dataset = _copy.copy(dataset)
col_type_map = {c:_dataset[c].dtype for c in _dataset.column_names()}
_validate_row_label(_label, col_type_map)
ref_labels = _dataset[_label]
if features is None:
_features = _dataset.column_names()
else:
_features = _copy.deepcopy(features)
free_features = set(_features).difference([_label])
if len(free_features) < 1:
raise _ToolkitError("The only available feature is the same as the " +
"row label column. Please specify features " +
"that are not also row labels.")
if isinstance(distance, list):
distance = _copy.deepcopy(distance)
elif (hasattr(distance, ) or
(isinstance(distance, str) and not distance == )):
distance = [[_features, distance, 1]]
elif distance is None or distance == :
sample = _dataset.head()
distance = _construct_auto_distance(_features,
_dataset.column_names(),
_dataset.column_types(),
sample)
else:
raise TypeError("Input not understood. The "
" argument must be a string, function handle, or " +
"composite distance.")
distance = _scrub_composite_distance_features(distance, [_label])
distance = _convert_distance_names_to_functions(distance)
_validate_composite_distance(distance)
list_features_to_check = []
sparse_distances = [, , , , ]
sparse_distances = [_turicreate.distances.__dict__[k] for k in sparse_distances]
for d in distance:
feature_names, dist, _ = d
list_features = [f for f in feature_names if _dataset[f].dtype == list]
for f in list_features:
if dist in sparse_distances:
list_features_to_check.append(f)
else:
raise TypeError("The chosen distance cannot currently be used " +
"on list-typed columns.")
for f in list_features_to_check:
only_str_lists = _validate_lists(_dataset[f], [str])
if not only_str_lists:
raise TypeError("Distances for sparse data, such as jaccard " +
"and weighted_jaccard, can only be used on " +
"lists containing only strings. Please modify " +
"any list features accordingly before creating " +
"the nearest neighbors model.")
for d in distance:
feature_names, dist, _ = d
if (len(feature_names) > 1) and (dist == _turicreate.distances.levenshtein):
raise ValueError("Levenshtein distance cannot be used with multiple " +
"columns. Please concatenate strings into a single " +
"column before creating the nearest neighbors model.")
clean_features = _get_composite_distance_features(distance)
sf_clean = _tkutl._toolkits_select_columns(_dataset, clean_features)
if len(distance) > 1:
_method =
if method != and verbose is True:
print("Defaulting to brute force instead of ball tree because " +\
"there are multiple distance components.")
else:
if method == :
num_variables = sum([len(x) if hasattr(x, ) else 1
for x in _six.itervalues(sf_clean[0])])
numeric_type_flag = all([x in [int, float, list, array.array]
for x in sf_clean.column_types()])
if ((distance[0][1] in [,
,
_turicreate.distances.euclidean,
_turicreate.distances.manhattan])
and numeric_type_flag is True
and num_variables <= 200):
_method =
else:
_method =
else:
_method = method
if _method == :
model_name =
elif _method == :
model_name =
elif _method == :
model_name =
else:
raise ValueError("Method must be , , , " +
"or .")
opts = {}
opts.update(_method_options)
opts.update(
{: model_name,
: ref_labels,
: label,
: sf_clean,
: distance})
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.train(opts)
model_proxy = result[]
model = NearestNeighborsModel(model_proxy)
return model
|
Create a nearest neighbor model, which can be searched efficiently and
quickly for the nearest neighbors of a query observation. If the `method`
argument is specified as `auto`, the type of model is chosen automatically
based on the type of data in `dataset`.
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change;
it is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
dataset : SFrame
Reference data. If the features for each observation are numeric, they
may be in separate columns of 'dataset' or a single column with lists
of values. The features may also be in the form of a column of sparse
vectors (i.e. dictionaries), with string keys and numeric values.
label : string, optional
Name of the SFrame column with row labels. If 'label' is not specified,
row numbers are used to identify reference dataset rows when the model
is queried.
features : list[string], optional
Name of the columns with features to use in computing distances between
observations and the query points. 'None' (the default) indicates that
all columns except the label should be used as features. Each column
can be one of the following types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate variable in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values.
Each key indicates a separate variable in the model.
- *List*: list of integer or string values. Each element is treated as
a separate variable in the model.
- *String*: string values.
Please note: if a composite distance is also specified, this parameter
is ignored.
distance : string, function, or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of three types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Function*: a function handle from the
:mod:`~turicreate.toolkits.distances` module.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (strings)
2. standard distance name (string)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
If 'distance' is left unspecified or set to 'auto', a composite
distance is constructed automatically based on feature types.
method : {'auto', 'ball_tree', 'brute_force', 'lsh'}, optional
Method for computing nearest neighbors. The options are:
- *auto* (default): the method is chosen automatically, based on the
type of data and the distance. If the distance is 'manhattan' or
'euclidean' and the features are numeric or vectors of numeric
values, then the 'ball_tree' method is used. Otherwise, the
'brute_force' method is used.
- *ball_tree*: use a tree structure to find the k-closest neighbors to
each query point. The ball tree model is slower to construct than the
brute force model, but queries are faster than linear time. This
method is not applicable for the cosine and dot product distances.
See `Liu, et al (2004)
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_ for
implementation details.
- *brute_force*: compute the distance from a query point to all
reference observations. There is no computation time for model
creation with the brute force method (although the reference data is
held in the model, but each query takes linear time.
- *lsh*: use Locality Sensitive Hashing (LSH) to find approximate
nearest neighbors efficiently. The LSH model supports 'euclidean',
'squared_euclidean', 'manhattan', 'cosine', 'jaccard', 'dot_product'
(deprecated), and 'transformed_dot_product' distances. Two options
are provided for LSH -- ``num_tables`` and
``num_projections_per_table``. See the notes below for details.
verbose: bool, optional
If True, print progress updates and model details.
**kwargs : optional
Options for the distance function and query method.
- *leaf_size*: for the ball tree method, the number of points in each
leaf of the tree. The default is to use the max of 1,000 and
n/(2^11), which ensures a maximum tree depth of 12.
- *num_tables*: For the LSH method, the number of hash tables
constructed. The default value is 20. We recommend choosing values
from 10 to 30.
- *num_projections_per_table*: For the LSH method, the number of
projections/hash functions for each hash table. The default value is
4 for 'jaccard' distance, 16 for 'cosine' distance and 8 for other
distances. We recommend using number 2 ~ 6 for 'jaccard' distance, 8
~ 20 for 'cosine' distance and 4 ~ 12 for other distances.
Returns
-------
out : NearestNeighborsModel
A structure for efficiently computing the nearest neighbors in 'dataset'
of new query points.
See Also
--------
NearestNeighborsModel.query, turicreate.toolkits.distances
Notes
-----
- Missing data is not allowed in the 'dataset' provided to this function.
Please use the :func:`turicreate.SFrame.fillna` and
:func:`turicreate.SFrame.dropna` utilities to handle missing data before
creating a nearest neighbors model.
- Missing keys in sparse vectors are assumed to have value 0.
- The `composite_params` parameter was removed as of Turi Create
version 1.5. The `distance` parameter now accepts either standard or
composite distances. Please see the :mod:`~turicreate.toolkits.distances`
module documentation for more information on composite distances.
- If the features should be weighted equally in the distance calculations
but are measured on different scales, it is important to standardize the
features. One way to do this is to subtract the mean of each column and
divide by the standard deviation.
**Locality Sensitive Hashing (LSH)**
There are several efficient nearest neighbors search algorithms that work
well for data with low dimensions :math:`d` (approximately 50). However,
most of the solutions suffer from either space or query time that is
exponential in :math:`d`. For large :math:`d`, they often provide little,
if any, improvement over the 'brute_force' method. This is a well-known
consequence of the phenomenon called `The Curse of Dimensionality`.
`Locality Sensitive Hashing (LSH)
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing>`_ is an approach
that is designed to efficiently solve the *approximate* nearest neighbor
search problem for high dimensional data. The key idea of LSH is to hash
the data points using several hash functions, so that the probability of
collision is much higher for data points which are close to each other than
those which are far apart.
An LSH family is a family of functions :math:`h` which map points from the
metric space to a bucket, so that
- if :math:`d(p, q) \\leq R`, then :math:`h(p) = h(q)` with at least probability :math:`p_1`.
- if :math:`d(p, q) \\geq cR`, then :math:`h(p) = h(q)` with probability at most :math:`p_2`.
LSH for efficient approximate nearest neighbor search:
- We define a new family of hash functions :math:`g`, where each
function :math:`g` is obtained by concatenating :math:`k` functions
:math:`h_1, ..., h_k`, i.e., :math:`g(p)=[h_1(p),...,h_k(p)]`.
The algorithm constructs :math:`L` hash tables, each of which
corresponds to a different randomly chosen hash function :math:`g`.
There are :math:`k \\cdot L` hash functions used in total.
- In the preprocessing step, we hash all :math:`n` reference points
into each of the :math:`L` hash tables.
- Given a query point :math:`q`, the algorithm iterates over the
:math:`L` hash functions :math:`g`. For each :math:`g` considered, it
retrieves the data points that are hashed into the same bucket as q.
These data points from all the :math:`L` hash tables are considered as
candidates that are then re-ranked by their real distances with the query
data.
**Note** that the number of tables :math:`L` and the number of hash
functions per table :math:`k` are two main parameters. They can be set
using the options ``num_tables`` and ``num_projections_per_table``
respectively.
Hash functions for different distances:
- `euclidean` and `squared_euclidean`:
:math:`h(q) = \\lfloor \\frac{a \\cdot q + b}{w} \\rfloor` where
:math:`a` is a vector, of which the elements are independently
sampled from normal distribution, and :math:`b` is a number
uniformly sampled from :math:`[0, r]`. :math:`r` is a parameter for the
bucket width. We set :math:`r` using the average all-pair `euclidean`
distances from a small randomly sampled subset of the reference data.
- `manhattan`: The hash function of `manhattan` is similar with that of
`euclidean`. The only difference is that the elements of `a` are sampled
from Cauchy distribution, instead of normal distribution.
- `cosine`: Random Projection is designed to approximate the cosine
distance between vectors. The hash function is :math:`h(q) = sgn(a \\cdot
q)`, where :math:`a` is randomly sampled normal unit vector.
- `jaccard`: We use a recently proposed method one permutation hashing by
Shrivastava and Li. See the paper `[Shrivastava and Li, UAI 2014]
<http://www.auai.org/uai2014/proceedings/individuals/225.pdf>`_ for
details.
- `dot_product`: The reference data points are first transformed to
fixed-norm vectors, and then the minimum `dot_product` distance search
problem can be solved via finding the reference data with smallest
`cosine` distances. See the paper `[Neyshabur and Srebro, ICML 2015]
<http://proceedings.mlr.press/v37/neyshabur15.html>`_ for details.
References
----------
- `Wikipedia - nearest neighbor
search <http://en.wikipedia.org/wiki/Nearest_neighbor_search>`_
- `Wikipedia - ball tree <http://en.wikipedia.org/wiki/Ball_tree>`_
- Ball tree implementation: Liu, T., et al. (2004) `An Investigation of
Practical Approximate Nearest Neighbor Algorithms
<http://papers.nips.cc/paper/2666-an-investigation-of-p
ractical-approximat e-nearest-neighbor-algorithms>`_. Advances in Neural
Information Processing Systems pp. 825-832.
- `Wikipedia - Jaccard distance
<http://en.wikipedia.org/wiki/Jaccard_index>`_
- Weighted Jaccard distance: Chierichetti, F., et al. (2010) `Finding the
Jaccard Median
<http://theory.stanford.edu/~sergei/papers/soda10-jaccard.pdf>`_.
Proceedings of the Twenty-First Annual ACM-SIAM Symposium on Discrete
Algorithms. Society for Industrial and Applied Mathematics.
- `Wikipedia - Cosine distance
<http://en.wikipedia.org/wiki/Cosine_similarity>`_
- `Wikipedia - Levenshtein distance
<http://en.wikipedia.org/wiki/Levenshtein_distance>`_
- Locality Sensitive Hashing : Chapter 3 of the book `Mining Massive
Datasets <http://infolab.stanford.edu/~ullman/mmds/ch3.pdf>`_.
Examples
--------
Construct a nearest neighbors model with automatically determined method
and distance:
>>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],
... 'X2': [0.69, 0.58, 0.36],
... 'str_feature': ['cat', 'dog', 'fossa']})
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'])
For datasets with a large number of rows and up to about 100 variables, the
ball tree method often leads to much faster queries.
>>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'],
... method='ball_tree')
Often the final determination of a neighbor is based on several distance
computations over different sets of features. Each part of this composite
distance may have a different relative weight.
>>> my_dist = [[['X1', 'X2'], 'euclidean', 2.],
... [['str_feature'], 'levenshtein', 3.]]
...
>>> model = turicreate.nearest_neighbors.create(sf, distance=my_dist)
|
24,009 |
def watch_login(status_code=302, msg=,
get_username=utils.get_username_from_request):
def decorated_login(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
if utils.is_already_locked(request):
return utils.lockout_response(request)
response = func(request, *args, **kwargs)
if request.method == :
if status_code == 302:
login_unsuccessful = (
response and
not response.has_header() and
response.status_code != status_code
)
else:
login_unsuccessful = (
response and response.status_code == status_code
and msg in response.content.decode()
)
utils.add_login_attempt_to_db(request, not login_unsuccessful,
get_username)
if utils.check_request(request, login_unsuccessful,
get_username):
return response
return utils.lockout_response(request)
return response
return wrapper
return decorated_login
|
Used to decorate the django.contrib.admin.site.login method or
any other function you want to protect by brute forcing.
To make it work on normal functions just pass the status code that should
indicate a failure and/or a string that will be checked within the
response body.
|
24,010 |
def clear_alert_destination(self, destination=0, channel=None):
if channel is None:
channel = self.get_network_channel()
self.set_alert_destination(
, False, 0, 0, destination, channel)
|
Clear an alert destination
Remove the specified alert destination configuration.
:param destination: The destination to clear (defaults to 0)
|
24,011 |
def add_lvl_to_ui(self, level, header):
lay = self.layout()
rc = lay.rowCount()
lay.addWidget(level, rc+1, 1)
if header is not None:
lay.addWidget(header, rc+1, 0)
lay.setColumnStretch(1,1)
|
Insert the level and header into the ui.
:param level: a newly created level
:type level: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel`
:param header: a newly created header
:type header: QtCore.QWidget|None
:returns: None
:rtype: None
:raises: None
|
24,012 |
def get_atom_feed_entry(self, feedentry_id):
return self.session.query(self.feedentry_model).filter(
self.feedentry_model.id == feedentry_id
).one()
|
Get a specific feed entry
:param id: id of the feed entry to retrieve
:return: the feed entry
|
24,013 |
def create(source,
requirement_files=None,
force=False,
keep_wheels=False,
archive_destination_dir=,
python_versions=None,
validate_archive=False,
wheel_args=,
archive_format=,
build_tag=):
if validate_archive:
_assert_virtualenv_is_installed()
logger.info(, source)
processed_source = get_source(source)
if os.path.isdir(processed_source) and not \
os.path.isfile(os.path.join(processed_source, )):
raise WagonError(
)
package_name, package_version = get_source_name_and_version(
processed_source)
tempdir = tempfile.mkdtemp()
workdir = os.path.join(tempdir, package_name)
wheels_path = os.path.join(workdir, DEFAULT_WHEELS_PATH)
try:
wheels = wheel(
processed_source,
requirement_files,
wheels_path,
wheel_args)
finally:
if processed_source != source:
shutil.rmtree(processed_source, ignore_errors=True)
platform = _get_platform_for_set_of_wheels(wheels_path)
if is_verbose():
logger.debug(, platform)
python_versions = _set_python_versions(python_versions)
if not os.path.isdir(archive_destination_dir):
os.makedirs(archive_destination_dir)
archive_name = _set_archive_name(
package_name, package_version, python_versions, platform, build_tag)
archive_path = os.path.join(archive_destination_dir, archive_name)
_handle_output_file(archive_path, force)
_generate_metadata_file(
workdir,
archive_name,
platform,
python_versions,
package_name,
package_version,
build_tag,
source,
wheels)
_create_wagon_archive(workdir, archive_path, archive_format)
if not keep_wheels:
logger.debug()
shutil.rmtree(tempdir, ignore_errors=True)
if validate_archive:
validate(archive_path)
logger.info(, archive_path)
return archive_path
|
Create a Wagon archive and returns its path.
Package name and version are extracted from the setup.py file
of the `source` or from the PACKAGE_NAME==PACKAGE_VERSION if the source
is a PyPI package.
Supported `python_versions` must be in the format e.g [33, 27, 2, 3]..
`force` will remove any excess dirs or archives before creation.
`requirement_files` can be either a link/local path to a
requirements.txt file or just `.`, in which case requirement files
will be automatically extracted from either the GitHub archive URL
or the local path provided provided in `source`.
|
24,014 |
def generate_item_instances(cls, items, mediawiki_api_url=, login=None,
user_agent=config[]):
assert type(items) == list
url = mediawiki_api_url
params = {
: ,
: .join(items),
:
}
headers = {
: user_agent
}
if login:
reply = login.get_session().get(url, params=params, headers=headers)
else:
reply = requests.get(url, params=params)
item_instances = []
for qid, v in reply.json()[].items():
ii = cls(wd_item_id=qid, item_data=v)
ii.mediawiki_api_url = mediawiki_api_url
item_instances.append((qid, ii))
return item_instances
|
A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of
tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of
WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items.
:param items: A list of QIDs or property IDs
:type items: list
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk
retrieval of items.
:type login: wdi_login.WDLogin
:return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the
instance of WDItemEngine with the corresponding item data.
|
24,015 |
def getColorMapAsDiscreetSLD(self, uniqueValues, nodata=-9999):
colorMap = ET.Element(, type=)
ET.SubElement(colorMap, , color=, quantity=str(nodata), label=, opacity=)
for value in uniqueValues:
red, green, blue = self.getColorForValue(value)
hexRGB = % (red,
green,
blue)
ET.SubElement(colorMap, , color=hexRGB, quantity=str(value), label=str(value), opacity=str(self.alpha))
return ET.tostring(colorMap)
|
Create the color map SLD format from a list of values.
:rtype: str
|
24,016 |
def _populate_trie(self, values: List[str]) -> CharTrie:
if self._default_tokenizer:
return reduce(self._populate_trie_reducer, iter(values), CharTrie())
return reduce(self._populate_trie_reducer_regex, iter(values), CharTrie())
|
Takes a list and inserts its elements into a new trie and returns it
|
24,017 |
def get_bhavcopy_url(self, d):
d = parser.parse(d).date()
day_of_month = d.strftime("%d")
mon = d.strftime("%b").upper()
year = d.year
url = self.bhavcopy_base_url % (year, mon, day_of_month, mon, year)
return url
|
take date and return bhavcopy url
|
24,018 |
def isRunActive(g):
if g.cpars[]:
url = g.cpars[] +
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=True)
if not rs.ok:
raise DriverError( + str(rs.err))
if rs.state == :
return False
elif rs.state == :
return True
else:
raise DriverError( + rs.state)
else:
raise DriverError()
|
Polls the data server to see if a run is active
|
24,019 |
def on_connect(self, client, userdata, flags, rc):
session present
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe()
self.mqtt_client.subscribe()
self.mqtt_client.subscribe()
self.mqtt_client.subscribe()
self.refresh_comports()
|
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
|
24,020 |
def zone(self, name, dns_name=None, description=None):
return ManagedZone(name, dns_name, client=self, description=description)
|
Construct a zone bound to this client.
:type name: str
:param name: Name of the zone.
:type dns_name: str
:param dns_name:
(Optional) DNS name of the zone. If not passed, then calls to
:meth:`zone.create` will fail.
:type description: str
:param description:
(Optional) the description for the zone. If not passed, defaults
to the value of 'dns_name'.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: a new ``ManagedZone`` instance.
|
24,021 |
def insert_chain(cur, chain, encoded_data=None):
if encoded_data is None:
encoded_data = {}
if not in encoded_data:
encoded_data[] = json.dumps(sorted(chain), separators=(, ))
if not in encoded_data:
encoded_data[] = len(chain)
insert = "INSERT OR IGNORE INTO chain(chain_length, nodes) VALUES (:chain_length, :nodes);"
cur.execute(insert, encoded_data)
|
Insert a chain into the cache.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
chain (iterable):
A collection of nodes. Chains in embedding act as one node.
encoded_data (dict, optional):
If a dictionary is provided, it will be populated with the serialized data. This is
useful for preventing encoding the same information many times.
Notes:
This function assumes that the nodes in chain are index-labeled.
|
24,022 |
def current_bed_temp(self):
try:
bedtemps = self.intervals[0][][]
num_temps = len(bedtemps)
if num_temps == 0:
return None
bedtemp = bedtemps[num_temps-1][1]
except KeyError:
bedtemp = None
return bedtemp
|
Return current bed temperature for in-progress session.
|
24,023 |
def set_start_date(self, date):
if self.get_start_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(date, self.get_start_date_metadata()):
raise errors.InvalidArgument()
self._my_map[] = date
|
Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
24,024 |
def acceptEdit(self):
if not self._lineEdit:
return
self.setText(self._lineEdit.text())
self._lineEdit.hide()
if not self.signalsBlocked():
self.editingFinished.emit(self._lineEdit.text())
|
Accepts the current edit for this label.
|
24,025 |
def init_widget(self):
super(AndroidTextClock, self).init_widget()
d = self.declaration
if d.format_12_hour:
self.set_format_12_hour(d.format_12_hour)
if d.format_24_hour:
self.set_format_24_hour(d.format_24_hour)
if d.time_zone:
self.set_time_zone(d.time_zone)
|
Initialize the underlying widget.
|
24,026 |
def do_before_loop(self):
logger.info("I am the arbiter: %s", self.link_to_myself.name)
if not self.is_master:
logger.debug("Waiting for my master death...")
return
if not self.daemons_start(run_daemons=True):
self.request_stop(message="Some Alignak daemons did not started correctly.",
exit_code=4)
if not self.daemons_check():
self.request_stop(message="Some Alignak daemons cannot be checked.",
exit_code=4)
pause = max(1, max(self.conf.daemons_start_timeout, len(self.my_daemons) * 0.5))
if pause:
logger.info("Pausing %.2f seconds...", pause)
time.sleep(pause)
self.configuration_dispatch()
_t0 = time.time()
self.get_initial_broks_from_satellites()
statsmgr.timer(, time.time() - _t0)
self.external_commands_manager = ExternalCommandManager(
self.conf, , self, self.conf.accept_passive_unknown_check_results,
self.conf.log_external_commands)
|
Called before the main daemon loop.
:return: None
|
24,027 |
def checkSanity(cls, trust_root_string):
trust_root = cls.parse(trust_root_string)
if trust_root is None:
return False
else:
return trust_root.isSane()
|
str -> bool
is this a sane trust root?
|
24,028 |
def add_host_to_segment(ipaddress, name, description, network_address, auth, url):
scope_id = get_scope_id(network_address, auth, url)
add_scope_ip(ipaddress, name, description, scope_id, auth,url)
|
Function to abstract existing add_scope_ip_function. Allows for use of network address rather than forcing human
to learn the scope_id
:param ipaddress:
:param name: name of the owner of this host
:param description: Description of the host
:param: network_address: network address of the target scope in format x.x.x.x/yy where x.x.x.x representents the
network address and yy represents the length of the subnet mask. Example: 10.50.0.0 255.255.255.0 would be written
as 10.50.0.0/24
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:
:rtype:
|
24,029 |
def is_object(brain_or_object):
if is_portal(brain_or_object):
return True
if is_at_content(brain_or_object):
return True
if is_dexterity_content(brain_or_object):
return True
if is_brain(brain_or_object):
return True
return False
|
Check if the passed in object is a supported portal content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: Portal Object
:returns: True if the passed in object is a valid portal content
|
24,030 |
def stop_instance(self, instance_id):
instance = self._load_instance(instance_id)
instance.terminate()
del self._instances[instance_id]
|
Stops the instance gracefully.
:param str instance_id: instance identifier
|
24,031 |
def get_validator(filter_data):
for matcher_type, m in matchers.items():
if hasattr(m, ) and m.can_handle(filter_data):
filter_data = m.handle(filter_data)
return filter_data
|
ask every matcher whether it can serve such filter data
:param filter_data:
:return:
|
24,032 |
def db_create(cls, impl, working_dir):
global VIRTUALCHAIN_DB_SCRIPT
log.debug("Setup chain state in {}".format(working_dir))
path = config.get_snapshots_filename(impl, working_dir)
if os.path.exists( path ):
raise Exception("Database {} already exists")
lines = [l + ";" for l in VIRTUALCHAIN_DB_SCRIPT.split(";")]
con = sqlite3.connect(path, isolation_level=None, timeout=2**30)
for line in lines:
con.execute(line)
con.row_factory = StateEngine.db_row_factory
return con
|
Create a sqlite3 db at the given path.
Create all the tables and indexes we need.
Returns a db connection on success
Raises an exception on error
|
24,033 |
def change_sample(self, old_samp_name, new_samp_name, new_site_name=None,
new_er_data=None, new_pmag_data=None, replace_data=False):
sample = self.find_by_name(old_samp_name, self.samples)
if not sample:
print(.format(old_samp_name))
return False
if new_site_name:
new_site = self.find_by_name(new_site_name, self.sites)
if not new_site:
print(.format(new_site_name, new_site_name))
new_site = self.add_site(new_site_name)
else:
new_site = None
sample.change_sample(new_samp_name, new_site, new_er_data, new_pmag_data, replace_data)
return sample
|
Find actual data objects for sample and site.
Then call Sample class change method to update sample name and data..
|
24,034 |
def add(ctx, short_name, uri, interval, buffer):
wva = get_wva(ctx)
subscription = wva.get_subscription(short_name)
subscription.create(uri, buffer, interval)
|
Add a subscription with a given short_name for a given uri
This command can be used to create subscriptions to receive new pieces
of vehicle data on the stream channel on a periodic basis. By default,
subscriptions are buffered and have a 5 second interval:
\b
$ wva subscriptions add speed vehicle/data/VehicleSpeed
$ wva subscriptions show speed
{'buffer': 'queue', 'interval': 5, 'uri': 'vehicle/data/VehicleSpeed'}
These parameters can be modified by the use of optional arguments:
$ wva subscriptions add rpm vehicle/data/EngineSpeed --interval 1 --buffer discard
$ wva subscriptions show rpm
{'buffer': 'discard', 'interval': 1, 'uri': 'vehicle/data/EngineSpeed'}
To view the data coming in as a result of these subscriptions, one can use
either 'wva subscriptions listen' or 'wva subscriptions graph <name>'.
|
24,035 |
def collate_data(in_dir, extension=, out_dir=None):
if out_dir is None:
out_dir = + re.search(, extension).groups(0)[0]
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for p, d, fs in os.walk(in_dir):
for f in fs:
if extension in f:
shutil.copy(p + + f, out_dir + + f)
return
|
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
|
24,036 |
def run(self, backend_args, archive_args=None, resume=False):
args = backend_args.copy()
if archive_args:
self.initialize_archive_manager(archive_args[])
if not resume:
max_date = backend_args.get(, None)
offset = backend_args.get(, None)
if max_date:
max_date = datetime_to_utc(max_date).timestamp()
self._result = JobResult(self.job_id, self.task_id, self.backend, self.category,
None, max_date, 0, offset=offset,
nresumed=0)
else:
if self.result.max_date:
args[] = unixtime_to_datetime(self.result.max_date)
if self.result.offset:
args[] = self.result.offset
self._result.nresumed += 1
for item in self._execute(args, archive_args):
self.conn.rpush(self.qitems, pickle.dumps(item))
self._result.nitems += 1
self._result.last_uuid = item[]
if not self.result.max_date or self.result.max_date < item[]:
self._result.max_date = item[]
if in item:
self._result.offset = item[]
|
Run the backend with the given parameters.
The method will run the backend assigned to this job,
storing the fetched items in a Redis queue. The ongoing
status of the job, can be accessed through the property
`result`. When `resume` is set, the job will start from
the last execution, overwriting 'from_date' and 'offset'
parameters, if needed.
Setting to `True` the parameter `fetch_from_archive`, items can
be fetched from the archive assigned to this job.
Any exception during the execution of the process will
be raised.
:param backend_args: parameters used to un the backend
:param archive_args: archive arguments
:param resume: fetch items starting where the last
execution stopped
|
24,037 |
def _set_nd_basic_indexing(self, key, value):
shape = self.shape
if isinstance(key, integer_types):
if key < 0:
key += shape[0]
if key < 0 or key >= shape[0]:
if key < 0:
key -= shape[0]
raise IndexError(
% (key, shape[0]))
key = py_slice(key, key+1)
if isinstance(key, py_slice):
assign_to_self = key.step is None or key.step == 1
assign_to_self &= key.start is None or key.start == 0
assign_to_self &= key.stop is None or key.stop == shape[0]
if assign_to_self:
if isinstance(value, NDArray):
if value.handle is not self.handle:
if value.shape != shape:
value = value.broadcast_to(shape)
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._full(shape=shape, ctx=self.context,
dtype=self.dtype, value=float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
if isinstance(value, np.generic) or value.shape != shape:
value = np.broadcast_to(value, shape)
self._sync_copyfrom(value)
else:
value_nd = self._prepare_value_nd(value, shape)
value_nd.copyto(self)
return
else:
key = (key,)
assert isinstance(key, tuple), "key=%s must be a tuple of slices and integers" % str(key)
assert len(key) <= len(shape), "Indexing dimensions exceed array dimensions, %d vs %d"\
% (len(key), len(shape))
begin = []
end = []
steps = []
oshape = []
vshape = []
for i, slice_i in enumerate(key):
dim_size = 1
if isinstance(slice_i, py_slice):
begin.append(slice_i.start)
end.append(slice_i.stop)
steps.append(slice_i.step)
start, stop, step = _get_index_range(slice_i.start, slice_i.stop,
shape[i], slice_i.step)
dim_size = _get_dim_size(start, stop, step)
vshape.append(dim_size)
elif isinstance(slice_i, integer_types):
begin.append(slice_i)
end.append(slice_i+1 if slice_i != -1 else self.shape[i])
steps.append(1)
else:
raise ValueError("basic indexing does not support index=%s of type=%s"
% (str(slice_i), str(type(slice_i))))
oshape.append(dim_size)
oshape.extend(shape[len(key):])
vshape.extend(shape[len(key):])
if len(vshape) == 0:
vshape.append(1)
oshape = tuple(oshape)
vshape = tuple(vshape)
if isinstance(value, numeric_types):
_internal._slice_assign_scalar(self, out=self, begin=begin, end=end,
step=steps, scalar=float(value))
else:
value_nd = self._prepare_value_nd(value, vshape)
if vshape != oshape:
value_nd = value_nd.reshape(oshape)
_internal._slice_assign(self, value_nd, begin, end, steps, out=self)
|
This function is called by __setitem__ when key is a basic index, i.e.
an integer, or a slice, or a tuple of integers and slices. No restrictions
on the values of slices' steps.
|
24,038 |
def resolve_font(name):
if os.path.exists(name):
return os.path.abspath(name)
fonts = get_font_files()
if name in fonts:
return fonts[name]
raise FontNotFound("Can( Try adding it to ~/.fonts" % name)
|
Turns font names into absolute filenames
This is case sensitive. The extension should be omitted.
For example::
>>> path = resolve_font('NotoSans-Bold')
>>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts')
>>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf')
>>> noto_path = os.path.abspath(noto_path)
>>> assert path == noto_path
Absolute paths are allowed::
>>> resolve_font(noto_path) == noto_path
True
Raises :exc:`FontNotFound` on failure::
>>> try:
... resolve_font('blahahaha')
... assert False
... except FontNotFound:
... pass
|
24,039 |
def compute_fov(self, x, y, fov=, radius=None,
light_walls=True, sphere=True, cumulative=False):
if radius is None:
radius = 0
if cumulative:
fov_copy = self.fov.copy()
lib.TCOD_map_compute_fov(
self.map_c, x, y, radius, light_walls, _get_fov_type(fov))
if cumulative:
self.fov[:] |= fov_copy
return zip(*np.where(self.fov))
|
Compute the field-of-view of this Map and return an iterator of the
points touched.
Args:
x (int): Point of view, x-coordinate.
y (int): Point of view, y-coordinate.
fov (Text): The type of field-of-view to be used.
Available types are:
'BASIC', 'DIAMOND', 'SHADOW', 'RESTRICTIVE', 'PERMISSIVE',
'PERMISSIVE0', 'PERMISSIVE1', ..., 'PERMISSIVE8'
radius (Optional[int]): Maximum view distance from the point of
view.
A value of 0 will give an infinite distance.
light_walls (bool): Light up walls, or only the floor.
sphere (bool): If True the lit area will be round instead of
square.
cumulative (bool): If True the lit cells will accumulate instead
of being cleared before the computation.
Returns:
Iterator[Tuple[int, int]]: An iterator of (x, y) points of tiles
touched by the field-of-view.
|
24,040 |
def split(self, sequence):
major_idx = sequence.idx
idx2 = 0
for start, end in zip(major_idx[:-1], major_idx[1:]):
idx1 = self.idx.index(start, idx2)
idx2 = self.idx.index(end, idx2)
seq = Sequence(self.text[start:end])
seq.idx = [x-start for x in self.idx[idx1:idx2]]
yield seq
|
Split into subsequences according to `sequence`.
|
24,041 |
def transform_to_length(nndata, length):
if length is None:
return nndata
if length:
for cn in range(length):
if cn not in nndata.cn_weights:
nndata.cn_weights[cn] = 0
nndata.cn_nninfo[cn] = []
return nndata
|
Given NNData, transforms data to the specified fingerprint length
Args:
nndata: (NNData)
length: (int) desired length of NNData
|
24,042 |
def getEstablishments(self, city_id, **kwargs):
params = {"city_id": city_id}
optional_params = ["lat", "lon"]
for key in optional_params:
if key in kwargs:
params[key] = kwargs[key]
establishments = self.api.get("/establishments", params)
return establishments
|
:param city_id: id of the city for which collections are needed
:param lat: latitude
:param lon: longitude
Get a list of restaurant types in a city. The location/City input can be provided in the following ways
- Using Zomato City ID
- Using coordinates of any location within a city
List of all restaurants categorized under a particular restaurant type can obtained using
/Search API with Establishment ID and location details as inputs
|
24,043 |
def parse_date(value):
if not value:
return None
if isinstance(value, datetime.date):
return value
return parse_datetime(value).date()
|
Attempts to parse `value` into an instance of ``datetime.date``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string, datetime.date, or
datetime.datetime value.
|
24,044 |
def parse_frequencies(variant, transcripts):
frequencies = {}
thousand_genomes_keys = []
thousand_genomes_max_keys = []
exac_keys = []
exac_max_keys = [, ]
gnomad_keys = [, ]
gnomad_max_keys = [, ]
for test_key in thousand_genomes_keys:
thousand_g = parse_frequency(variant, test_key)
if thousand_g:
frequencies[] = thousand_g
break
for test_key in thousand_genomes_max_keys:
thousand_g_max = parse_frequency(variant, test_key)
if thousand_g_max:
frequencies[] = thousand_g_max
break
for test_key in exac_keys:
exac = parse_frequency(variant, test_key)
if exac:
frequencies[] = exac
break
for test_key in exac_max_keys:
exac_max = parse_frequency(variant, test_key)
if exac_max:
frequencies[] = exac_max
break
for test_key in gnomad_keys:
gnomad = parse_frequency(variant, test_key)
if gnomad:
frequencies[] = gnomad
break
for test_key in gnomad_max_keys:
gnomad_max = parse_frequency(variant, test_key)
if gnomad_max:
frequencies[] = gnomad_max
break
if not frequencies:
for transcript in transcripts:
exac = transcript.get()
exac_max = transcript.get()
thousand_g = transcript.get()
thousandg_max = transcript.get()
gnomad = transcript.get()
gnomad_max = transcript.get()
if exac:
frequencies[] = exac
if exac_max:
frequencies[] = exac_max
if thousand_g:
frequencies[] = thousand_g
if thousandg_max:
frequencies[] = thousandg_max
if gnomad:
frequencies[] = gnomad
if gnomad_max:
frequencies[] = gnomad_max
thousand_g_left = parse_frequency(variant, )
if thousand_g_left:
frequencies[] = thousand_g_left
thousand_g_right = parse_frequency(variant, )
if thousand_g_right:
frequencies[] = thousand_g_right
return frequencies
|
Add the frequencies to a variant
Frequencies are parsed either directly from keys in info fieds or from the
transcripts is they are annotated there.
Args:
variant(cyvcf2.Variant): A parsed vcf variant
transcripts(iterable(dict)): Parsed transcripts
Returns:
frequencies(dict): A dictionary with the relevant frequencies
|
24,045 |
def determine_coords(list_of_variable_dicts):
from .dataarray import DataArray
from .dataset import Dataset
coord_names = set()
noncoord_names = set()
for variables in list_of_variable_dicts:
if isinstance(variables, Dataset):
coord_names.update(variables.coords)
noncoord_names.update(variables.data_vars)
else:
for name, var in variables.items():
if isinstance(var, DataArray):
coords = set(var._coords)
coords.discard(name)
coord_names.update(coords)
return coord_names, noncoord_names
|
Given a list of dicts with xarray object values, identify coordinates.
Parameters
----------
list_of_variable_dicts : list of dict or Dataset objects
Of the same form as the arguments to expand_variable_dicts.
Returns
-------
coord_names : set of variable names
noncoord_names : set of variable names
All variable found in the input should appear in either the set of
coordinate or non-coordinate names.
|
24,046 |
def register_plugin(self, name):
logger.info("Registering plugin: " + name)
module = importlib.import_module(name)
module.register_plugin(self)
|
Load and register a plugin given its package name.
|
24,047 |
def _pages_to_generate(self):
return res
|
Return list of slugs that correspond to pages to generate.
|
24,048 |
def pinfo(self,obj,oname=,formatter=None,info=None,detail_level=0):
info = self.info(obj, oname=oname, formatter=formatter,
info=info, detail_level=detail_level)
displayfields = []
def add_fields(fields):
for title, key in fields:
field = info[key]
if field is not None:
displayfields.append((title, field.rstrip()))
add_fields(self.pinfo_fields1)
if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info[]:
displayfields.append(("Base Class", info[].rstrip()))
add_fields(self.pinfo_fields2)
if info[] != :
displayfields.append(("Namespace", info[].rstrip()))
add_fields(self.pinfo_fields3)
if detail_level > 0 and info[] is not None:
displayfields.append(("Source", self.format(py3compat.cast_bytes_py2(info[]))))
elif info[] is not None:
displayfields.append(("Docstring", info["docstring"]))
if info[]:
if info[] or info[]:
displayfields.append(("Constructor information", ""))
if info[] is not None:
displayfields.append((" Definition",
info[].rstrip()))
if info[] is not None:
displayfields.append((" Docstring",
indent(info[])))
else:
add_fields(self.pinfo_fields_obj)
if displayfields:
page.page(self._format_fields(displayfields))
|
Show detailed information about an object.
Optional arguments:
- oname: name of the variable pointing to the object.
- formatter: special formatter for docstrings (see pdoc)
- info: a structure with some information fields which may have been
precomputed already.
- detail_level: if set to 1, more information is given.
|
24,049 |
def connect(token, protocol=RtmProtocol, factory=WebSocketClientFactory, factory_kwargs=None, api_url=None, debug=False):
if factory_kwargs is None:
factory_kwargs = dict()
metadata = request_session(token, api_url)
wsfactory = factory(metadata.url, **factory_kwargs)
if debug:
warnings.warn()
wsfactory.protocol = lambda *a,**k: protocol(*a,**k)._seedMetadata(metadata)
connection = connectWS(wsfactory)
return connection
|
Creates a new connection to the Slack Real-Time API.
Returns (connection) which represents this connection to the API server.
|
24,050 |
def _updown(self, direction):
if direction == "up" and self.top_line != 0:
self.top_line -= 1
elif direction == "down" and \
self.screen.getmaxyx()[0] + self.top_line\
<= self.content_lines + 3:
self.top_line += 1
|
Provides curses scroll functionality.
|
24,051 |
def get_user_information():
try:
import pwd
_username = pwd.getpwuid(os.getuid())[0]
_userid = os.getuid()
_uname = os.uname()[1]
except ImportError:
import getpass
_username = getpass.getuser()
_userid = 0
import platform
_uname = platform.node()
return _username, _userid, _uname
|
Returns the user's information
:rtype: (str, int, str)
|
24,052 |
def make_backup_files(*,
mongodump=MONGODB_DEFAULT_MONGODUMP,
hosts={},
host_defaults={},
dry_run=False,
**kwargs):
output_dir = fs.get_output_dir()
utils.chkstr(mongodump, )
mongodb_hosts = hosts
if len(mongodb_hosts) == 0:
raise ValueError("No mongodb specified!")
mongodb_defaults = host_defaults
mongodump_files = {}
for mongodb_host_name, mongodb_host in mongodb_hosts.items():
if type(mongodb_host) != dict:
raise TypeError("mongodb_host must be dict"
.format(name=mongodb_host_name))
if not in mongodb_host:
raise KeyError("No specified!")
utils.chkstr(mongodb_host[], )
_set_mongodb_host_val(, MONGODB_DEFAULT_USER,
mongodb_host, mongodb_defaults)
_set_mongodb_host_val(, MONGODB_DEFAULT_PWD,
mongodb_host, mongodb_defaults)
_set_mongodb_host_val(, MONGODB_DEFAULT_PORT,
mongodb_host, mongodb_defaults)
_set_mongodb_host_val(, MONGODB_DEFAULT_AUTH,
mongodb_host, mongodb_defaults)
if in mongodb_defaults:
if in mongodb_host:
mongodb_host[] = _merge_dbs(mongodb_defaults[],
mongodb_host[])
else:
mongodb_host[] = mongodb_defaults[]
mongodump_files[mongodb_host_name] = _make_backup_file(dry_run=dry_run, mongodump=mongodump,
output_dir=output_dir, name=mongodb_host_name,
**mongodb_host)
return mongodump_files
|
Backup all specified databases into a gzipped tarball via mongodump
:param mongodump(str, optional): Path to mongodump executable
:param hosts(dict, optional): A dict containing hosts info to be backed up
:param host_defaults(dict, optional): Default values applied to each host
:param dry_run(bool, optional): Whether to activate dry run mode
:param \*\*kwargs: arbitrary keyword arguments
:raises TypeError: if an argument in kwargs does not have the type expected
:raises ValueError: if an argument within kwargs has an invalid value
|
24,053 |
def _handle_continue(self, node, scope, ctxt, stream):
self._dlog("handling continue")
raise errors.InterpContinue()
|
Handle continue node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
|
24,054 |
def load(self, orbit=None):
if not self.sat.empty:
self._calcOrbits()
if orbit is not None:
if orbit < 0:
orbit = self.num + 1 + orbit
if orbit == 1:
self.next()
except StopIteration:
self._getBasicOrbit(orbit=1)
print( % (self._current - 1))
elif orbit == self.num:
if self.num != 1:
self._getBasicOrbit(self.num - 1)
self.next()
else:
self._getBasicOrbit(orbit=-1)
elif orbit < self.num:
self._getBasicOrbit(orbit)
print( % (self._current - 1))
else:
self.sat.data = DataFrame()
raise Exception()
else:
raise Exception()
else:
print()
|
Load a particular orbit into .data for loaded day.
Parameters
----------
orbit : int
orbit number, 1 indexed
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will automatically be
padded with data from the next day. The orbit counter will be
reset to 1.
|
24,055 |
def load_sanitizers(self, config_data):
section_strategy = config_data.get("strategy")
if not isinstance(section_strategy, dict):
if section_strategy is None:
return
raise ConfigurationError(
" is %s instead of dict" % (
type(section_strategy),
),
)
for table_name, column_data in six.iteritems(section_strategy):
if not isinstance(column_data, dict):
if column_data is None:
continue
raise ConfigurationError(
" is %s instead of dict" % (
table_name,
type(column_data),
),
)
for column_name, sanitizer_name in six.iteritems(column_data):
if sanitizer_name is None:
continue
if not isinstance(sanitizer_name, six.text_type):
raise ConfigurationError(
" is %s instead of string" % (
table_name,
column_name,
type(sanitizer_name),
),
)
sanitizer_callback = self.find_sanitizer(sanitizer_name)
sanitizer_key = "%s.%s" % (table_name, column_name)
self.sanitizers[sanitizer_key] = sanitizer_callback
|
Loads sanitizers possibly defined in the configuration under dictionary
called "strategy", which should contain mapping of database tables with
column names mapped into sanitizer function names.
:param config_data: Already parsed configuration data, as dictionary.
:type config_data: dict[str,any]
|
24,056 |
def get_client(quiet=False, debug=False):
from spython.utils import get_singularity_version
from .base import Client
Client.quiet = quiet
Client.debug = debug
from .apps import apps
from .build import build
from .execute import execute
from .help import help
from .inspect import inspect
from .instances import ( instances, stopall )
from .run import run
from .pull import pull
Client.apps = apps
Client.build = build
Client.execute = execute
Client.help = help
Client.inspect = inspect
Client.instances = instances
Client.run = run
Client.pull = pull
from spython.image.cmd import generate_image_commands
Client.image = generate_image_commands()
from spython.instance.cmd import generate_instance_commands
Client.instance = generate_instance_commands()
Client.instance_stopall = stopall
Client.instance.version = Client.version
if "version 3" in get_singularity_version():
from spython.oci.cmd import generate_oci_commands
Client.oci = generate_oci_commands()()
Client.oci.debug = Client.debug
Client.oci.quiet = Client.quiet
Client.oci.OciImage.quiet = Client.quiet
Client.oci.OciImage.debug = Client.debug
cli = Client()
cli.image.debug = cli.debug
cli.image.quiet = cli.quiet
cli.instance.debug = cli.debug
cli.instance.quiet = cli.quiet
return cli
|
get the client and perform imports not on init, in case there are any
initialization or import errors.
Parameters
==========
quiet: if True, suppress most output about the client
debug: turn on debugging mode
|
24,057 |
def clinvar_export(store, institute_id, case_name, variant_id):
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
pinned = [store.variant(variant_id) or variant_id for variant_id in
case_obj.get(, [])]
variant_obj = store.variant(variant_id)
return dict(
today = str(date.today()),
institute=institute_obj,
case=case_obj,
variant=variant_obj,
pinned_vars=pinned
)
|
Gather the required data for creating the clinvar submission form
Args:
store(scout.adapter.MongoAdapter)
institute_id(str): Institute ID
case_name(str): case ID
variant_id(str): variant._id
Returns:
a dictionary with all the required data (case and variant level) to pre-fill in fields in the clinvar submission form
|
24,058 |
def sam_list(sam):
list = []
for file in sam:
for line in file:
if line.startswith() is False:
line = line.strip().split()
id, map = line[0], int(line[1])
if map != 4 and map != 8:
list.append(id)
return set(list)
|
get a list of mapped reads
|
24,059 |
def assert_conditions(self):
self.assert_condition_md5()
etag = self.clean_etag(self.call_method())
self.response.last_modified = self.call_method()
self.assert_condition_etag()
self.assert_condition_last_modified()
|
Handles various HTTP conditions and raises HTTP exceptions to
abort the request.
- Content-MD5 request header must match the MD5 hash of the full
input (:func:`assert_condition_md5`).
- If-Match and If-None-Match etags are checked against the ETag of
this resource (:func:`assert_condition_etag`).
- If-Modified-Since and If-Unmodified-Since are checked against
the modification date of this resource
(:func:`assert_condition_last_modified`).
.. todo:: Return a 501 exception when any Content-* headers have been
set in the request. (See :rfc:`2616`, section 9.6)
|
24,060 |
def validate_init_args_statically(distribution, batch_shape):
if tensorshape_util.rank(batch_shape.shape) is not None:
if tensorshape_util.rank(batch_shape.shape) != 1:
raise ValueError("`batch_shape` must be a vector "
"(saw rank: {}).".format(
tensorshape_util.rank(batch_shape.shape)))
batch_shape_static = tensorshape_util.constant_value_as_shape(batch_shape)
batch_size_static = tensorshape_util.num_elements(batch_shape_static)
dist_batch_size_static = tensorshape_util.num_elements(
distribution.batch_shape)
if batch_size_static is not None and dist_batch_size_static is not None:
if batch_size_static != dist_batch_size_static:
raise ValueError("`batch_shape` size ({}) must match "
"`distribution.batch_shape` size ({}).".format(
batch_size_static, dist_batch_size_static))
if tensorshape_util.dims(batch_shape_static) is not None:
if any(
tf.compat.dimension_value(dim) is not None and
tf.compat.dimension_value(dim) < 1 for dim in batch_shape_static):
raise ValueError("`batch_shape` elements must be >=-1.")
|
Helper to __init__ which makes or raises assertions.
|
24,061 |
def Deserialize(self, reader):
self.HashStart = reader.ReadSerializableArray()
self.HashStop = reader.ReadUInt256()
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
|
24,062 |
def get_tools_paths(self):
if settings.DEBUG or is_testing():
return list(get_apps_tools().values())
else:
tools_root = settings.FLOW_TOOLS_ROOT
subdirs = next(os.walk(tools_root))[1]
return [os.path.join(tools_root, sdir) for sdir in subdirs]
|
Get tools' paths.
|
24,063 |
def maps_get_default_rules_output_rules_rbridgeid(self, **kwargs):
config = ET.Element("config")
maps_get_default_rules = ET.Element("maps_get_default_rules")
config = maps_get_default_rules
output = ET.SubElement(maps_get_default_rules, "output")
rules = ET.SubElement(output, "rules")
rbridgeid = ET.SubElement(rules, "rbridgeid")
rbridgeid.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
24,064 |
def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
from . import sso
cert = soapStub.schemeArgs[]
key = soapStub.schemeArgs[]
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
|
Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
|
24,065 |
def backwards(apps, schema_editor):
titles = [
,
,
,
,
,
,
]
samples = EventBase.objects.filter(title__in=titles)
samples.delete()
|
Delete sample events, including derivative repeat and variation events.
|
24,066 |
def new_dxfile(mode=None, write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE, expected_file_size=None, file_is_mmapd=False,
**kwargs):
dx_file = DXFile(mode=mode, write_buffer_size=write_buffer_size, expected_file_size=expected_file_size,
file_is_mmapd=file_is_mmapd)
dx_file.new(**kwargs)
return dx_file
|
:param mode: One of "w" or "a" for write and append modes, respectively
:type mode: string
:rtype: :class:`~dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Creates a new remote file object that is ready to be written to;
returns a :class:`~dxpy.bindings.dxfile.DXFile` object that is a
writable file-like object.
Example::
with new_dxfile(media_type="application/json") as fd:
fd.write("foo\\n")
Note that this is shorthand for::
dxFile = DXFile()
dxFile.new(**kwargs)
|
24,067 |
def _rec_get_names(args, names=None):
if names is None:
names = []
for arg in args:
if isinstance(arg, node_classes.Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
|
return a list of all argument names
|
24,068 |
def psffunc(self, *args, **kwargs):
if self.polychromatic:
func = psfcalc.calculate_polychrome_linescan_psf
else:
func = psfcalc.calculate_linescan_psf
return func(*args, **kwargs)
|
Calculates a linescan psf
|
24,069 |
def add_result(self, scan_id, result_type, host=, name=, value=,
port=, test_id=, severity=, qod=):
assert scan_id
assert len(name) or len(value)
result = dict()
result[] = result_type
result[] = name
result[] = severity
result[] = test_id
result[] = value
result[] = host
result[] = port
result[] = qod
results = self.scans_table[scan_id][]
results.append(result)
|
Add a result to a scan in the table.
|
24,070 |
def _pop_letters(char_list):
logger.debug(, char_list)
letters = []
while len(char_list) != 0 and char_list[0].isalpha():
letters.append(char_list.pop(0))
logger.debug(, letters)
logger.debug(, char_list)
return letters
|
Pop consecutive letters from the front of a list and return them
Pops any and all consecutive letters from the start of the provided
character list and returns them as a list of characters. Operates
on (and possibly alters) the passed list
:param list char_list: a list of characters
:return: a list of characters
:rtype: list
|
24,071 |
def uninstall(cls):
if os.path.exists(cls.home):
shutil.rmtree(cls.home)
|
Remove the package manager from the system.
|
24,072 |
def list_users(ctx, search, uuid, active):
users = ctx.obj[].objectmodels[]
for found_user in users.find():
if not search or (search and search in found_user.name):
print(found_user.name, end= if active or uuid else )
if uuid:
print(found_user.uuid, end= if active else )
if active:
print(found_user.active)
log("Done")
|
List all locally known users
|
24,073 |
def get_default_sess_config(mem_fraction=0.99):
conf = tfv1.ConfigProto()
conf.allow_soft_placement = True
conf.intra_op_parallelism_threads = 1
conf.inter_op_parallelism_threads = 0
conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction
conf.gpu_options.allow_growth = True
return conf
|
Return a tf.ConfigProto to use as default session config.
You can modify the returned config to fit your needs.
Args:
mem_fraction(float): see the `per_process_gpu_memory_fraction` option
in TensorFlow's GPUOptions protobuf:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto
Returns:
tf.ConfigProto: the config to use.
|
24,074 |
def _duplicateLayer(self, layerName, newLayerName):
newLayer = self.getLayer(layerName).copy()
return self.insertLayer(newLayer, newLayerName)
|
This is the environment implementation of :meth:`BaseFont.duplicateLayer`.
**layerName** will be a :ref:`type-string` representing a valid layer name.
The value will have been normalized with :func:`normalizers.normalizeLayerName`
and **layerName** will be a layer that exists in the font. **newLayerName**
will be a :ref:`type-string` representing a valid layer name. The value will
have been normalized with :func:`normalizers.normalizeLayerName` and
**newLayerName** will have been tested to make sure that no layer with
the same name exists in the font. This must return an instance of a
:class:`BaseLayer` subclass.
Subclasses may override this method.
|
24,075 |
def evaluate(self, instance, step, extra):
defaults = dict(self.defaults)
if extra:
defaults.update(extra)
return self.generate(step, defaults)
|
Evaluate the current definition and fill its attributes.
Uses attributes definition in the following order:
- values defined when defining the ParameteredAttribute
- additional values defined when instantiating the containing factory
Args:
instance (builder.Resolver): The object holding currently computed
attributes
step: a factory.builder.BuildStep
extra (dict): additional, call-time added kwargs
for the step.
|
24,076 |
def data_struct(*args, **kw):
m = pimms.merge(*args, **kw)
return DataStruct(**m)
|
data_struct(args...) collapses all arguments (which must be maps) and keyword arguments
right-to-left into a single mapping and uses this mapping to create a DataStruct object.
|
24,077 |
def currentGrouping( self ):
groupBy = self.groupBy()
if ( groupBy == XOrbBrowserWidget.GroupByAdvancedKey ):
return self.advancedGrouping()
else:
table = self.tableType()
if ( not table ):
return []
for column in table.schema().columns():
if ( column.displayName() == groupBy ):
return [column.name()]
return []
|
Returns the current grouping for this widget.
:return [<str> group level, ..]
|
24,078 |
def _get_event_request_header(self):
otr_status = (hangouts_pb2.OFF_THE_RECORD_STATUS_OFF_THE_RECORD
if self.is_off_the_record else
hangouts_pb2.OFF_THE_RECORD_STATUS_ON_THE_RECORD)
return hangouts_pb2.EventRequestHeader(
conversation_id=hangouts_pb2.ConversationId(id=self.id_),
client_generated_id=self._client.get_client_generated_id(),
expected_otr=otr_status,
delivery_medium=self._get_default_delivery_medium(),
)
|
Return EventRequestHeader for conversation.
|
24,079 |
def _check_initialized(self):
baddies = self._find_uninitialized()
if baddies:
raise datastore_errors.BadValueError(
% .join(baddies))
|
Internal helper to check for uninitialized properties.
Raises:
BadValueError if it finds any.
|
24,080 |
def __discoverPlugins():
for app in settings.INSTALLED_APPS:
if not app.startswith():
module = __import__(app)
moduledir = path.Path(module.__file__).parent
plugin = moduledir /
if plugin.exists():
file_, fpath, desc = imp.find_module(, [moduledir])
if file_:
imp.load_module(, file_, fpath, desc)
return FrogPluginRegistry.plugins
|
Discover the plugin classes contained in Python files, given a
list of directory names to scan. Return a list of plugin classes.
|
24,081 |
def _clean(self, magic):
if magic.lower() == :
self.magic =
elif magic[:2].lower() == :
self.magic = magic[2:]
elif magic[:2].lower() == :
self.ext = magic[1:]
|
Given a magic string, remove the output tag designator.
|
24,082 |
def save_devices(self):
log.debug("saving devices to ...".format(self._devices_filename))
if self._devices != []:
with codecs.open(self._devices_filename, "wb", "utf-8") as f:
json.dump(self._devices, f)
|
save devices that have been obtained from LaMetric cloud
to a local file
|
24,083 |
def similar(names=None, ids=None, start=0, results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None, seed_catalog=None,artist_start_year_before=None, \
artist_start_year_after=None,artist_end_year_before=None,artist_end_year_after=None):
buckets = buckets or []
kwargs = {}
if ids:
if not isinstance(ids, list):
ids = [ids]
kwargs[] = ids
if names:
if not isinstance(names, list):
names = [names]
kwargs[] = names
if max_familiarity is not None:
kwargs[] = max_familiarity
if min_familiarity is not None:
kwargs[] = min_familiarity
if max_hotttnesss is not None:
kwargs[] = max_hotttnesss
if min_hotttnesss is not None:
kwargs[] = min_hotttnesss
if seed_catalog is not None:
kwargs[] = seed_catalog
if start:
kwargs[] = start
if results:
kwargs[] = results
if buckets:
kwargs[] = buckets
if limit:
kwargs[] =
if artist_start_year_before:
kwargs[] = artist_start_year_before
if artist_start_year_after:
kwargs[] = artist_start_year_after
if artist_end_year_before:
kwargs[] = artist_end_year_before
if artist_end_year_after:
kwargs[] = artist_end_year_after
result = util.callm("%s/%s" % (, ), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result[][]]
|
Return similar artists to this one
Args:
Kwargs:
ids (str/list): An artist id or list of ids
names (str/list): An artist name or list of names
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
start (int): An integer starting value for the result set
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
seed_catalog (str): A string specifying the catalog similar artists are restricted to
Returns:
A list of similar Artist objects
Example:
>>> some_dudes = [artist.Artist('weezer'), artist.Artist('radiohead')]
>>> some_dudes
[<artist - Weezer>, <artist - Radiohead>]
>>> sims = artist.similar(ids=[art.id for art in some_dudes], results=5)
>>> sims
[<artist - The Smashing Pumpkins>, <artist - Biffy Clyro>, <artist - Death Cab for Cutie>, <artist - Jimmy Eat World>, <artist - Nerf Herder>]
>>>
|
24,084 |
def create_comment_browser(self, layout):
brws = CommentBrowser(1, headers=[])
layout.insertWidget(1, brws)
return brws
|
Create a comment browser and insert it into the given layout
:param layout: the layout to insert the browser into
:type layout: QLayout
:returns: the created browser
:rtype: :class:`jukeboxcore.gui.widgets.browser.ListBrowser`
:raises: None
|
24,085 |
def by_user_and_perm(cls, user_id, perm_name, db_session=None):
db_session = get_db_session(db_session)
query = db_session.query(cls.model).filter(cls.model.user_id == user_id)
query = query.filter(cls.model.perm_name == perm_name)
return query.first()
|
return by user and permission name
:param user_id:
:param perm_name:
:param db_session:
:return:
|
24,086 |
def expected_h(nvals, fit="RANSAC"):
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0]
|
Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
Returns:
float:
expected h for white noise
|
24,087 |
def _recv_sf(self, data):
self.rx_timer.cancel()
if self.rx_state != ISOTP_IDLE:
warning("RX state was reset because single frame was received")
self.rx_state = ISOTP_IDLE
length = six.indexbytes(data, 0) & 0xf
if len(data) - 1 < length:
return 1
msg = data[1:1 + length]
self.rx_queue.put(msg)
for cb in self.rx_callbacks:
cb(msg)
self.call_release()
return 0
|
Process a received 'Single Frame' frame
|
24,088 |
def preorder(self):
node_stack = [self]
result = []
while len(node_stack) > 0:
node = node_stack.pop()
result.append(node)
if node.right is not None:
node_stack.append(node.right)
if node.left is not None:
node_stack.append(node.left)
return result
|
Return the nodes in the binary tree using pre-order_ traversal.
A pre-order_ traversal visits root, left subtree, then right subtree.
.. _pre-order: https://en.wikipedia.org/wiki/Tree_traversal
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.preorder
[Node(1), Node(2), Node(4), Node(5), Node(3)]
|
24,089 |
def load_py(stream, filepath=None):
with add_sys_paths(config.package_definition_build_python_paths):
return _load_py(stream, filepath=filepath)
|
Load python-formatted data from a stream.
Args:
stream (file-like object).
Returns:
dict.
|
24,090 |
def run():
logging.basicConfig(level=logging.DEBUG)
load_config.ConfigLoader().load()
config.debug = True
print(repr(config.engine.item(sys.argv[1])))
|
Module level test.
|
24,091 |
def conf_budget(self, budget):
if self.maplesat:
pysolvers.maplechrono_cbudget(self.maplesat, budget)
|
Set limit on the number of conflicts.
|
24,092 |
def validate_one(func_name):
doc = Docstring(func_name)
errs, wrns, examples_errs = get_validation_data(doc)
return {: doc.type,
: doc.clean_doc,
: doc.deprecated,
: doc.source_file_name,
: doc.source_file_def_line,
: doc.github_url,
: errs,
: wrns,
: examples_errs}
|
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring.
|
24,093 |
def count(self, source, target):
try:
source_value = self._response_holder[source]
except KeyError:
try:
mock_value = json.loads(_target)
except:
mock_value = _target
if not self._is_empty:
values.append(mock_value)
return values
|
The 'count' relationship is used for listing endpoints where a specific attribute
might hold the value to the number of instances of another attribute.
|
24,094 |
def scale(self, scalex, scaley=None, center=(0, 0)):
c0 = numpy.array(center)
s = scalex if scaley is None else numpy.array((scalex, scaley))
self.polygons = [(points - c0) * s + c0 for points in self.polygons]
return self
|
Scale this object.
Parameters
----------
scalex : number
Scaling factor along the first axis.
scaley : number or ``None``
Scaling factor along the second axis. If ``None``, same as
``scalex``.
center : array-like[2]
Center point for the scaling operation.
Returns
-------
out : ``PolygonSet``
This object.
|
24,095 |
def reservations(self):
command = [SINFO, ]
output = subprocess.check_output(command, env=SINFO_ENV)
output = output.decode()
it = iter(output.splitlines())
next(it)
for line in it:
rsv = Reservation.from_sinfo(line)
yield rsv.name, rsv
|
get nodes of every reservations
|
24,096 |
def tagged(self, tag):
return PostCollection([p for p in self if unicode(tag) in p.tags])
|
Returns a new PostCollection containing the subset of posts that are tagged with *tag*.
|
24,097 |
def _set_service(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=service.service, is_container=, presence=False, yang_name="service", rest_name="service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__service = t
if hasattr(self, ):
self._set()
|
Setter method for service, mapped from YANG variable /service (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_service() directly.
|
24,098 |
def note(name, source=None, contents=None, **kwargs):
comment =
if source:
comment += .format(source)
if contents and len(contents) < 200:
comment += contents
return {: name, : True, : comment, : {}}
|
Add content to a document generated using `highstate_doc.render`.
This state does not preform any tasks on the host. It only is used in highstate_doc lowstate proccessers
to include extra documents.
.. code-block:: yaml
{{sls}} example note:
highstate_doc.note:
- name: example note
- require_in:
- pkg: somepackage
- contents: |
example `highstate_doc.note`
------------------
This state does not do anything to the system! It is only used by a `proccesser`
you can use `requisites` and `order` to move your docs around the rendered file.
.. this message appare aboce the `pkg: somepackage` state.
- source: salt://{{tpldir}}/also_include_a_file.md
{{sls}} extra help:
highstate_doc.note:
- name: example
- order: 0
- source: salt://{{tpldir}}/HELP.md
|
24,099 |
def get_gcp_client(**kwargs):
return _gcp_client(project=kwargs[], mod_name=kwargs[],
pkg_name=kwargs.get(, ),
key_file=kwargs.get(, None),
http_auth=kwargs.get(, None),
user_agent=kwargs.get(, None))
|
Public GCP client builder.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.