Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
380,000 | def sepBy(p, sep):
return separated(p, sep, 0, maxt=float(), end=False) | `sepBy(p, sep)` parses zero or more occurrences of p, separated by `sep`.
Returns a list of values returned by `p`. |
380,001 | async def _download_predicate_data(self, class_, controller):
await self.authenticate()
url = (
.format(self.base_url, controller, class_))
resp = await self._ratelimited_get(url)
await _raise_for_status(resp)
resp_json = await resp.json()
return resp_json[] | Get raw predicate information for given request class, and cache for
subsequent calls. |
380,002 | def _transport_interceptor(self, callback):
def add_item_to_queue(header, message):
queue_item = (
Priority.TRANSPORT,
next(
self._transport_interceptor_counter
),
(callback, header, message),
)
self.__queue.put(
queue_item
)
return add_item_to_queue | Takes a callback function and returns a function that takes headers and
messages and places them on the main service queue. |
380,003 | def is_anagram(s, t):
maps = {}
mapt = {}
for i in s:
maps[i] = maps.get(i, 0) + 1
for i in t:
mapt[i] = mapt.get(i, 0) + 1
return maps == mapt | :type s: str
:type t: str
:rtype: bool |
380,004 | def v_from_i(resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent, method=):
s responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmannlambertwnewtonbrentqbrentq
if method.lower() == :
return _singlediode._lambertw_v_from_i(
resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent
)
else:
args = (current, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
V = _singlediode.bishop88_v_from_i(*args, method=method.lower())
size, shape = _singlediode._get_size_and_shape(args)
if size <= 1:
if shape is not None:
V = np.tile(V, shape)
if np.isnan(V).any() and size <= 1:
V = np.repeat(V, size)
if shape is not None:
V = V.reshape(shape)
return V | Device voltage at the given device current for the single diode model.
Uses the single diode model (SDM) as described in, e.g.,
Jain and Kapoor 2004 [1].
The solution is per Eq 3 of [1] except when resistance_shunt=numpy.inf,
in which case the explict solution for voltage is used.
Ideal device parameters are specified by resistance_shunt=np.inf and
resistance_series=0.
Inputs to this function can include scalars and pandas.Series, but it is
the caller's responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
current : numeric
The current in amperes under desired IV curve conditions.
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
method : str
Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:
``'brentq'`` is limited to 1st quadrant only.
Returns
-------
current : np.ndarray or scalar
References
----------
[1] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277. |
380,005 | def lookup(self, hostname):
matches = [
config
for config in self._config
if self._allowed(config["host"], hostname)
]
ret = SSHConfigDict()
for match in matches:
for key, value in match["config"].items():
if key not in ret:
ret[key] = value[:] if value is not None else value
elif key == "identityfile":
ret[key].extend(value)
ret = self._expand_variables(ret, hostname)
if "proxycommand" in ret and ret["proxycommand"] is None:
del ret["proxycommand"]
return ret | Return a dict (`SSHConfigDict`) of config options for a given hostname.
The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
For each parameter, the first obtained value will be used. The
configuration files contain sections separated by ``Host``
specifications, and that section is only applied for hosts that match
one of the patterns given in the specification.
Since the first obtained value for each parameter is used, more host-
specific declarations should be given near the beginning of the file,
and general defaults at the end.
The keys in the returned dict are all normalized to lowercase (look for
``"port"``, not ``"Port"``. The values are processed according to the
rules for substitution variable expansion in ``ssh_config``.
Finally, please see the docs for `SSHConfigDict` for deeper info on
features such as optional type conversion methods, e.g.::
conf = my_config.lookup('myhost')
assert conf['passwordauthentication'] == 'yes'
assert conf.as_bool('passwordauthentication') is True
:param str hostname: the hostname to lookup
.. versionchanged:: 2.5
Returns `SSHConfigDict` objects instead of dict literals. |
380,006 | def ccdmask(flat1, flat2=None, mask=None, lowercut=6.0, uppercut=6.0,
siglev=1.0, mode=, nmed=(7, 7), nsig=(15, 15)):
if flat2 is None:
flat1, flat2 = flat2, flat1
flat1 = numpy.ones_like(flat2)
if mask is None:
mask = numpy.zeros_like(flat1, dtype=)
ratio = numpy.zeros_like(flat1)
invalid = numpy.zeros_like(flat1)
invalid[mask == PIXEL_HOT] = HIGH_SIGMA
invalid[mask == PIXEL_DEAD] = LOW_SIGMA
gmask = mask == PIXEL_VALID
_logger.info(, numpy.count_nonzero(gmask))
smask = mask != PIXEL_VALID
_logger.info(, numpy.count_nonzero(smask))
zero_mask = numpy.logical_or(flat1[gmask] <= 0, flat2[gmask] <= 0)
if numpy.any(zero_mask):
mask, gmask, smask = update_mask(mask, gmask, zero_mask, PIXEL_DEAD)
invalid[mask == PIXEL_DEAD] = LOW_SIGMA
ratio[gmask] = flat2[gmask] / flat1[gmask]
ratio[smask] = invalid[smask]
if mode == :
_logger.info(, nmed)
ratio_med = scipy.ndimage.filters.median_filter(ratio, size=nmed)
ratio[gmask] -= ratio_med[gmask]
else:
_logger.info()
ratio_med = numpy.median(ratio[gmask])
ratio[gmask] -= ratio_med
qns = 100 * scipy.stats.norm.cdf(siglev)
pns = 100 - qns
_logger.info(, siglev)
_logger.info(, pns, qns)
sigma = numpy.zeros_like(ratio)
if mode == :
mshape = max_blk_coverage(blk=nsig, shape=ratio.shape)
_logger.info(, nsig)
_logger.info(, mshape)
block_gen = blk_nd_short(blk=nsig, shape=ratio.shape)
else:
mshape = ratio.shape
_logger.info()
block_gen = itertools.repeat(slice(None), 1)
for blk in block_gen:
m = mask[blk] == PIXEL_VALID
valid_points = numpy.ravel(ratio[blk][m])
ls = scipy.stats.scoreatpercentile(valid_points, pns)
hs = scipy.stats.scoreatpercentile(valid_points, qns)
_logger.debug()
_logger.debug(, ls, hs)
sig = (hs - ls) / (2 * siglev)
_logger.debug(, sig)
sigma[blk] = sig
fill0 = ratio.shape[0] - mshape[0]
fill1 = ratio.shape[1] - mshape[1]
if fill0 > 0:
_logger.info(, fill0)
sigma[:, mshape[0]:] = sigma[:, mshape[0] - fill0:mshape[0]]
if fill1 > 0:
_logger.info(, fill1)
sigma[mshape[1]:, :] = sigma[mshape[1] - fill1:mshape[1], :]
ratio[gmask] /= sigma[gmask]
f1_ratio = ratio[gmask]
f1_mask = mask[gmask]
f1_mask[f1_ratio >= uppercut] = PIXEL_HOT
f1_mask[f1_ratio <= -lowercut] = PIXEL_DEAD
mask[gmask] = f1_mask
return ratio, mask, sigma | Find cosmetic defects in a detector using two flat field images.
Two arrays representing flat fields of different exposure times are
required. Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between `flat2` and `flat1`. The median of the ratio
is computed and subtracted. Then, the standard deviation is estimated
computing the percentiles
nearest to the pixel values corresponding to`siglev` in the normal CDF.
The standard deviation is then the distance between the pixel values
divided by two times `siglev`. The ratio image is then normalized with
this standard deviation.
The behavior of the function depends on the value of the parameter
`mode`. If the value is 'region' (the default), both the median
and the sigma are computed in boxes. If the value is 'full', these
values are computed using the full array.
The size of the boxes in 'region' mode is given by `nmed` for
the median computation and `nsig` for the standard deviation.
The values in the normalized ratio array above `uppercut`
are flagged as hot pixels, and those below '-lowercut` are
flagged as dead pixels in the output mask.
:parameter flat1: an array representing a flat illuminated exposure.
:parameter flat2: an array representing a flat illuminated exposure.
:parameter mask: an integer array representing initial mask.
:parameter lowercut: values below this sigma level are flagged as dead pixels.
:parameter uppercut: values above this sigma level are flagged as hot pixels.
:parameter siglev: level to estimate the standard deviation.
:parameter mode: either 'full' or 'region'
:parameter nmed: region used to compute the median
:parameter nsig: region used to estimate the standard deviation
:returns: the normalized ratio of the flats, the updated mask and standard deviation
.. note::
This function is based on the description of the task
ccdmask of IRAF
.. seealso::
:py:func:`cosmetics`
Operates much like this function but computes
median and sigma in the whole image instead of in boxes |
380,007 | def delete_dashboard(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_dashboard_with_http_info(id, **kwargs)
else:
(data) = self.delete_dashboard_with_http_info(id, **kwargs)
return data | Delete a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dashboard(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerDashboard
If the method is called asynchronously,
returns the request thread. |
380,008 | def remove_from_category(self, category):
ctype = ContentType.objects.get_for_model(self)
self.categories.model.objects.filter(category=category, content_type=ctype, object_id=self.id).delete() | Removes this object from a given category.
:param Category category:
:return: |
380,009 | def com_adobe_fonts_check_family_consistent_upm(ttFonts):
upm_set = set()
for ttFont in ttFonts:
upm_set.add(ttFont[].unitsPerEm)
if len(upm_set) > 1:
yield FAIL, ("Fonts have different units per em: {}."
).format(sorted(upm_set))
else:
yield PASS, "Fonts have consistent units per em." | Fonts have consistent Units Per Em? |
380,010 | def pypi(
click_ctx,
requirements,
index=None,
python_version=3,
exclude_packages=None,
output=None,
subgraph_check_api=None,
no_transitive=True,
no_pretty=False,
):
requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement]
if not requirements:
_LOG.error("No requirements specified, exiting")
sys.exit(1)
if not subgraph_check_api:
_LOG.info(
"No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided"
)
result = resolve_python(
requirements,
index_urls=index.split(",") if index else ("https://pypi.org/simple",),
python_version=int(python_version),
transitive=not no_transitive,
exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))),
subgraph_check_api=subgraph_check_api,
)
print_command_result(
click_ctx,
result,
analyzer=analyzer_name,
analyzer_version=analyzer_version,
output=output or "-",
pretty=not no_pretty,
) | Manipulate with dependency requirements using PyPI. |
380,011 | def verify_signature(self,
signing_key,
message,
signature,
padding_method,
signing_algorithm=None,
hashing_algorithm=None,
digital_signature_algorithm=None):
backend = default_backend()
hash_algorithm = None
dsa_hash_algorithm = None
dsa_signing_algorithm = None
if hashing_algorithm:
hash_algorithm = self._encryption_hash_algorithms.get(
hashing_algorithm
)
if digital_signature_algorithm:
algorithm_pair = self._digital_signature_algorithms.get(
digital_signature_algorithm
)
if algorithm_pair:
dsa_hash_algorithm = algorithm_pair[0]
dsa_signing_algorithm = algorithm_pair[1]
if dsa_hash_algorithm and dsa_signing_algorithm:
if hash_algorithm and (hash_algorithm != dsa_hash_algorithm):
raise exceptions.InvalidField(
"The hashing algorithm does not match the digital "
"signature algorithm."
)
if (signing_algorithm and
(signing_algorithm != dsa_signing_algorithm)):
raise exceptions.InvalidField(
"The signing algorithm does not match the digital "
"signature algorithm."
)
signing_algorithm = dsa_signing_algorithm
hash_algorithm = dsa_hash_algorithm
if signing_algorithm == enums.CryptographicAlgorithm.RSA:
if padding_method == enums.PaddingMethod.PSS:
if hash_algorithm:
padding = asymmetric_padding.PSS(
mgf=asymmetric_padding.MGF1(hash_algorithm()),
salt_length=asymmetric_padding.PSS.MAX_LENGTH
)
else:
raise exceptions.InvalidField(
"A hashing algorithm must be specified for PSS "
"padding."
)
elif padding_method == enums.PaddingMethod.PKCS1v15:
padding = asymmetric_padding.PKCS1v15()
else:
raise exceptions.InvalidField(
"The padding method is not supported for signature "
"verification.".format(padding_method)
)
try:
public_key = backend.load_der_public_key(signing_key)
except Exception:
try:
public_key = backend.load_pem_public_key(signing_key)
except Exception:
raise exceptions.CryptographicFailure(
"The signing key bytes could not be loaded."
)
try:
public_key.verify(
signature,
message,
padding,
hash_algorithm()
)
return True
except errors.InvalidSignature:
return False
except Exception:
raise exceptions.CryptographicFailure(
"The signature verification process failed."
)
else:
raise exceptions.InvalidField(
"The signing algorithm is not supported for "
"signature verification.".format(signing_algorithm)
) | Verify a message signature.
Args:
signing_key (bytes): The bytes of the signing key to use for
signature verification. Required.
message (bytes): The bytes of the message that corresponds with
the signature. Required.
signature (bytes): The bytes of the signature to be verified.
Required.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use during signature verification. Required.
signing_algorithm (CryptographicAlgorithm): An enumeration
specifying the cryptographic algorithm to use for signature
verification. Only RSA is supported. Optional, must match the
algorithm specified by the digital signature algorithm if both
are provided. Defaults to None.
hashing_algorithm (HashingAlgorithm): An enumeration specifying
the hashing algorithm to use with the cryptographic algortihm,
if needed. Optional, must match the algorithm specified by the
digital signature algorithm if both are provided. Defaults to
None.
digital_signature_algorithm (DigitalSignatureAlgorithm): An
enumeration specifying both the cryptographic and hashing
algorithms to use for signature verification. Optional, must
match the cryptographic and hashing algorithms if both are
provided. Defaults to None.
Returns:
boolean: the result of signature verification, True for valid
signatures, False for invalid signatures
Raises:
InvalidField: Raised when various settings or values are invalid.
CryptographicFailure: Raised when the signing key bytes cannot be
loaded, or when the signature verification process fails
unexpectedly. |
380,012 | def _setLearningMode(self, l4Learning = False, l2Learning=False):
for column in self.L4Columns:
column.setParameter("learn", 0, l4Learning)
for column in self.L2Columns:
column.setParameter("learningMode", 0, l2Learning) | Sets the learning mode for L4 and L2. |
380,013 | def format(self, status, headers, environ, bucket, delay):
entity = ("This request was rate-limited. "
"Please retry your request after %s." %
time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(bucket.next)))
headers[] =
return status, entity | Formats a response entity. Returns a tuple of the desired
status code and the formatted entity. The default status code
is passed in, as is a dictionary of headers.
:param status: The default status code. Should be returned to
the caller, or an alternate selected. The
status code should include both the number and
the message, separated by a single space.
:param headers: A dictionary of headers for the response.
Should update the 'Content-Type' header at a
minimum.
:param environ: The WSGI environment for the request.
:param bucket: The bucket containing the data which caused the
delay decision to be made. This can be used to
obtain such information as the next time the
request can be made.
:param delay: The number of seconds by which the request
should be delayed. |
380,014 | def move(self, x, y):
if not isinstance(x, baseinteger):
raise TypeError("x can only be an instance of type baseinteger")
if not isinstance(y, baseinteger):
raise TypeError("y can only be an instance of type baseinteger")
self._call("move",
in_p=[x, y]) | Changes the overlay's position relative to the IFramebuffer.
in x of type int
in y of type int |
380,015 | def from_word2vec(fname, fvocab=None, binary=False):
vocabulary = None
if fvocab is not None:
logger.info("loading word counts from %s" % (fvocab))
vocabulary = Embedding.from_word2vec_vocab(fvocab)
logger.info("loading projection weights from %s" % (fname))
if binary:
words, vectors = Embedding._from_word2vec_binary(fname)
else:
words, vectors = Embedding._from_word2vec_text(fname)
if not vocabulary:
vocabulary = OrderedVocabulary(words=words)
return Embedding(vocabulary=vocabulary, vectors=vectors) | Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool). |
380,016 | def append(self, item):
self.real_list.append(item)
self.observer(UpdateType.CREATED, item, len(self.real_list) - 1) | See :meth:`list.append()` method
Calls observer ``self.observer(UpdateType.CREATED, item, index)`` where
**index** is *item position* |
380,017 | def bilinear_sampling(input_layer, x, y, name=PROVIDED):
input_layer.get_shape().assert_has_rank(4)
return _interpolate(im=input_layer, x=x, y=y, name=name) | Performs bilinear sampling. This must be a rank 4 Tensor.
Implements the differentiable sampling mechanism with bilinear kernel
in https://arxiv.org/abs/1506.02025.
Given (x, y) coordinates for each output pixel, use bilinear sampling on
the input_layer to fill the output.
Args:
input_layer: The chainable object, supplied.
x: A tensor of size [batch_size, height, width, 1] representing the sampling
x coordinates normalized to range [-1,1].
y: A tensor of size [batch_size, height, width, 1] representing the
sampling y coordinates normalized to range [-1,1].
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer |
380,018 | def validate(self, instance, value):
try:
compval = complex(value)
if not self.cast and (
abs(value.real - compval.real) > TOL or
abs(value.imag - compval.imag) > TOL
):
self.error(
instance=instance,
value=value,
extra=.format(TOL),
)
except (TypeError, ValueError, AttributeError):
self.error(instance, value)
return compval | Checks that value is a complex number
Floats and Integers are coerced to complex numbers |
380,019 | def stringify(self) :
"a pretty str version of getChain()"
l = []
h = self.head
while h :
l.append(str(h._key))
h = h.nextDoc
return "<->".join(l) | a pretty str version of getChain() |
380,020 | def parse_version(str_):
v = re.findall(r"\d+.\d+.\d+", str_)
if v:
return v[0]
else:
print("cannot parse string {}".format(str_))
raise KeyError | Parses the program's version from a python variable declaration. |
380,021 | def send_signal(self, backend, signal):
backend = self._expand_host(backend)
if backend in self.backends:
try:
return self._work(backend, self._package(signal), log=False)
except socket.error:
raise BackendNotAvailableError
else:
raise ValueError() | Sends the `signal` signal to `backend`. Raises ValueError if `backend`
is not registered with the client. Returns the result. |
380,022 | def wgs84togcj02(lng, lat):
if out_of_china(lng, lat):
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglng, mglat] | WGS84转GCJ02(火星坐标系)
:param lng:WGS84坐标系的经度
:param lat:WGS84坐标系的纬度
:return: |
380,023 | def _get_imported_module(self, module_name):
imp_mod = self.by_name.get(module_name)
if imp_mod:
return imp_mod
no_obj = module_name.rsplit(, 1)[0]
imp_mod2 = self.by_name.get(no_obj)
if imp_mod2:
return imp_mod2
if module_name in self.pkgs:
pkg_name = module_name + ".__init__"
return self.by_name[pkg_name]
if no_obj in self.pkgs:
pkg_name = no_obj + ".__init__"
return self.by_name[pkg_name] | try to get imported module reference by its name |
380,024 | def isect(list1, list2):
r
set2 = set(list2)
return [item for item in list1 if item in set2] | r"""
returns list1 elements that are also in list2. preserves order of list1
intersect_ordered
Args:
list1 (list):
list2 (list):
Returns:
list: new_list
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list1 = ['featweight_rowid', 'feature_rowid', 'config_rowid', 'featweight_forground_weight']
>>> list2 = [u'featweight_rowid']
>>> result = intersect_ordered(list1, list2)
>>> print(result)
['featweight_rowid']
Timeit:
def timeit_func(func, *args):
niter = 10
times = []
for count in range(niter):
with ut.Timer(verbose=False) as t:
_ = func(*args)
times.append(t.ellapsed)
return sum(times) / niter
grid = {
'size1': [1000, 5000, 10000, 50000],
'size2': [1000, 5000, 10000, 50000],
#'overlap': [0, 1],
}
data = []
for kw in ut.all_dict_combinations(grid):
pool = np.arange(kw['size1'] * 2)
size2 = size1 = kw['size1']
size2 = kw['size2']
list1 = (np.random.rand(size1) * size1).astype(np.int32).tolist()
list1 = ut.random_sample(pool, size1).tolist()
list2 = ut.random_sample(pool, size2).tolist()
list1 = set(list1)
list2 = set(list2)
kw['ut'] = timeit_func(ut.isect, list1, list2)
#kw['np1'] = timeit_func(np.intersect1d, list1, list2)
#kw['py1'] = timeit_func(lambda a, b: set.intersection(set(a), set(b)), list1, list2)
kw['py2'] = timeit_func(lambda a, b: sorted(set.intersection(set(a), set(b))), list1, list2)
data.append(kw)
import pandas as pd
pd.options.display.max_rows = 1000
pd.options.display.width = 1000
df = pd.DataFrame.from_dict(data)
data_keys = list(grid.keys())
other_keys = ut.setdiff(df.columns, data_keys)
df = df.reindex_axis(data_keys + other_keys, axis=1)
df['abs_change'] = df['ut'] - df['py2']
df['pct_change'] = df['abs_change'] / df['ut'] * 100
#print(df.sort('abs_change', ascending=False))
print(str(df).split('\n')[0])
for row in df.values:
argmin = row[len(data_keys):len(data_keys) + len(other_keys)].argmin() + len(data_keys)
print(' ' + ', '.join([
'%6d' % (r) if x < len(data_keys) else (
ut.color_text('%8.6f' % (r,), 'blue')
if x == argmin else '%8.6f' % (r,))
for x, r in enumerate(row)
]))
%timeit ut.isect(list1, list2)
%timeit np.intersect1d(list1, list2, assume_unique=True)
%timeit set.intersection(set(list1), set(list2))
#def highlight_max(s):
# '''
# highlight the maximum in a Series yellow.
# '''
# is_max = s == s.max()
# return ['background-color: yellow' if v else '' for v in is_max]
#df.style.apply(highlight_max) |
380,025 | def start(self):
self._context.add_service_listener(
self, self.requirement.filter, self.requirement.specification
) | Starts the dependency manager |
380,026 | def getDelOps(self, buid):
return (
(, (buid, self.form.name, self.name, self.storinfo)),
) | Get a list of storage operations to delete this property from the buid.
Args:
buid (bytes): The node buid.
Returns:
(tuple): The storage operations |
380,027 | def from_css(Class, csstext, encoding=None, href=None, media=None, title=None, validate=None):
styles = Class()
cssStyleSheet = cssutils.parseString(csstext, encoding=encoding, href=href, media=media, title=title, validate=validate)
for rule in cssStyleSheet.cssRules:
if rule.type==cssutils.css.CSSRule.FONT_FACE_RULE:
if styles.get() is None: styles[] = []
styles[].append(Class.styleProperties(rule.style))
elif rule.type==cssutils.css.CSSRule.IMPORT_RULE:
if styles.get() is None: styles[] = []
styles[].append("url(%s)" % rule.href)
elif rule.type==cssutils.css.CSSRule.NAMESPACE_RULE:
if styles.get() is None: styles[] = {}
styles[][rule.prefix] = rule.namespaceURI
elif rule.type==cssutils.css.CSSRule.MEDIA_RULE:
if styles.get() is None: styles[] = []
styles[].append(rule.cssText)
elif rule.type==cssutils.css.CSSRule.PAGE_RULE:
if styles.get() is None: styles[] = []
styles[].append(rule.cssText)
elif rule.type==cssutils.css.CSSRule.STYLE_RULE:
for selector in rule.selectorList:
sel = selector.selectorText
if sel not in styles:
styles[sel] = Class.styleProperties(rule.style)
elif rule.type==cssutils.css.CSSRule.CHARSET_RULE:
styles[] = rule.encoding
elif rule.type==cssutils.css.CSSRule.COMMENT:
pass
elif rule.type==cssutils.css.CSSRule.VARIABLES_RULE:
pass
else:
log.warning("Unknown rule type: %r" % rule.cssText)
return styles | parse CSS text into a Styles object, using cssutils |
380,028 | def blob_handler(self, cmd):
self.blobs[cmd.id] = cmd
self.keep = False | Process a BlobCommand. |
380,029 | def _algebraic_rules_scalar():
a = wc("a", head=SCALAR_VAL_TYPES)
b = wc("b", head=SCALAR_VAL_TYPES)
x = wc("x", head=SCALAR_TYPES)
y = wc("y", head=SCALAR_TYPES)
z = wc("z", head=SCALAR_TYPES)
indranges__ = wc("indranges__", head=IndexRangeBase)
ScalarTimes._binary_rules.update(check_rules_dict([
(, (
pattern_head(a, b),
lambda a, b: a * b)),
(, (
pattern_head(x, x),
lambda x: x**2)),
(, (
pattern_head(Zero, x),
lambda x: Zero)),
(, (
pattern_head(x, Zero),
lambda x: Zero)),
(, (
pattern_head(
pattern(ScalarPower, x, y),
pattern(ScalarPower, x, z)),
lambda x, y, z: x**(y+z))),
(, (
pattern_head(x, pattern(ScalarPower, x, -1)),
lambda x: One)),
]))
ScalarPower._rules.update(check_rules_dict([
(, (
pattern_head(a, b),
lambda a, b: a**b)),
(, (
pattern_head(x, 0),
lambda x: One)),
(, (
pattern_head(x, 1),
lambda x: x)),
(, (
pattern_head(pattern(ScalarPower, x, y), z),
lambda x, y, z: x**(y*z))),
]))
def pull_constfactor_from_sum(x, y, indranges):
bound_symbols = set([r.index_symbol for r in indranges])
if len(x.free_symbols.intersection(bound_symbols)) == 0:
return x * ScalarIndexedSum.create(y, *indranges)
else:
raise CannotSimplify()
ScalarIndexedSum._rules.update(check_rules_dict([
(, (
pattern_head(Zero, indranges__),
lambda indranges: Zero)),
(, (
pattern_head(pattern(ScalarTimes, x, y), indranges__),
lambda x, y, indranges:
pull_constfactor_from_sum(x, y, indranges))),
])) | Set the default algebraic rules for scalars |
380,030 | def _unique_ordered_lines(line_numbers):
if len(line_numbers) == 0:
return []
line_set = set(line_numbers)
return sorted([line for line in line_set]) | Given a list of line numbers, return a list in which each line
number is included once and the lines are ordered sequentially. |
380,031 | def question_detail(request, topic_slug, slug):
url = reverse(, kwargs={: topic_slug})
return _fragmentify(Question, slug, url) | A detail view of a Question.
Simply redirects to a detail page for the related :model:`faq.Topic`
(:view:`faq.views.topic_detail`) with the addition of a fragment
identifier that links to the given :model:`faq.Question`.
E.g. ``/faq/topic-slug/#question-slug``. |
380,032 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.text is not None:
_dict[] = self.text
return _dict | Return a json dictionary representing this model. |
380,033 | def invert_pixel_mask(mask):
inverted_mask = np.ones(shape=(80, 336), dtype=np.dtype())
inverted_mask[mask >= 1] = 0
return inverted_mask | Invert pixel mask (0->1, 1(and greater)->0).
Parameters
----------
mask : array-like
Mask.
Returns
-------
inverted_mask : array-like
Inverted Mask. |
380,034 | def _validate_arguments(self):
if self._has_terms():
[term._validate_arguments() for term in self._terms]
return self | method to sanitize model parameters
Parameters
---------
None
Returns
-------
None |
380,035 | def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
return random.randint(self._wait_random_min, self._wait_random_max) | Sleep a random amount of time between wait_random_min and wait_random_max |
380,036 | def safe_read_file(file_path: Path) -> str:
for encoding in FILE_ENCODINGS:
try:
return file_path.read_text(encoding=encoding)
except UnicodeError:
pass
raise GuesslangError(.format(file_path)) | Read a text file. Several text encodings are tried until
the file content is correctly decoded.
:raise GuesslangError: when the file encoding is not supported
:param file_path: path to the input file
:return: text file content |
380,037 | def get_fermi(self, c, T, rtol=0.01, nstep=50, step=0.1, precision=8):
fermi = self.efermi
for _ in range(precision):
frange = np.arange(-nstep, nstep + 1) * step + fermi
calc_doping = np.array([self.get_doping(f, T) for f in frange])
relative_error = abs(calc_doping / c - 1.0)
fermi = frange[np.argmin(relative_error)]
step /= 10.0
if min(relative_error) > rtol:
raise ValueError(.format(
rtol * 100, c))
return fermi | Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to c. A greedy algorithm is used where the
relative error is minimized by calculating the doping at a grid which
is continuously become finer.
Args:
c (float): doping concentration. c<0 represents n-type doping and
c>0 represents p-type doping (i.e. majority carriers are holes)
T (float): absolute temperature in Kelvin
rtol (float): maximum acceptable relative error
nstep (int): number of steps checked around a given fermi level
step (float): initial step in fermi level when searching
precision (int): essentially the decimal places of calculated fermi
Returns (float): the fermi level. Note that this is different from the
default dos.efermi. |
380,038 | def stl(A, b):
r
from scipy.linalg import solve_triangular
A = asarray(A, float)
b = asarray(b, float)
return solve_triangular(A, b, lower=True, check_finite=False) | r"""Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``.
Solve linear systems :math:`\mathrm A \mathbf x = \mathbf b` when
:math:`\mathrm A` is a lower-triangular matrix.
Args:
A (array_like): A lower-triangular matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Solution ``x``.
See Also
--------
scipy.linalg.solve_triangular: Solve triangular linear equations. |
380,039 | def GetHostMemUsedMB(self):
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemUsedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | Undocumented. |
380,040 | def get_table_info(conn, tablename):
r = conn.execute("pragma table_info()".format(tablename))
ret = TableInfo(((row["name"], row) for row in r))
return ret | Returns TableInfo object |
380,041 | def wait_for_element_not_present(self, locator):
for i in range(timeout_seconds):
if self.driver.is_element_present(locator):
time.sleep(1)
else:
break
else:
raise ElementVisiblityTimeout("%s presence timed out" % locator)
return True | Synchronization helper to wait until some element is removed from the page
:raises: ElementVisiblityTimeout |
380,042 | def autoset_id(self):
try:
self.id_
except AttributeError:
pass
else:
if self.id_:
return
self.id_ = to_nmtoken(random.getrandbits(8*RANDOM_ID_BYTES)) | If the :attr:`id_` already has a non-false (false is also the empty
string!) value, this method is a no-op.
Otherwise, the :attr:`id_` attribute is filled with
:data:`RANDOM_ID_BYTES` of random data, encoded by
:func:`aioxmpp.utils.to_nmtoken`.
.. note::
This method only works on subclasses of :class:`StanzaBase` which
define the :attr:`id_` attribute. |
380,043 | def ip2long(ip):
if not validate_ip(ip):
return None
quads = ip.split()
if len(quads) == 1:
quads = quads + [0, 0, 0]
elif len(quads) < 4:
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip | Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid. |
380,044 | def generate(self):
answer = self.rand.randrange(self.max)
answer = str(answer).zfill(self.digits)
image_data = self.image_generator.generate(answer)
base64_captcha = base64.b64encode(image_data.getvalue()).decode("ascii")
logging.debug( + answer)
session[] = answer
return base64_captcha | Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}"> |
380,045 | def populate(self, priority, address, rtr, data):
assert isinstance(data, bytes)
self.needs_no_rtr(rtr)
self.needs_data(data, 7)
self.set_attributes(priority, address, rtr)
self.channel = (data[0] & 0x03) +1
self.pulses = (data[0] >> 2) * 100
self.counter = (data[1] << 24) + (data[2] << 16) + (data[3] << 8) + data[4]
self.delay = (data[5] << 8) + data[6] | -DB1 last 2 bits = channel
-DB1 first 6 bist = pulses
-DB2-5 = pulse counter
-DB6-7 = ms/pulse
:return: None |
380,046 | def apply_classifier(self, name, samples=None, subset=None):
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc= + name + ) as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
params=(c.analytes, c.method))
prog.update()
return name | Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str |
380,047 | def plot_ranges_from_cli(opts):
mins = {}
for x in opts.mins:
x = x.split()
if len(x) != 2:
raise ValueError("option --mins not specified correctly; see help")
mins[x[0]] = float(x[1])
maxs = {}
for x in opts.maxs:
x = x.split()
if len(x) != 2:
raise ValueError("option --maxs not specified correctly; see help")
maxs[x[0]] = float(x[1])
return mins, maxs | Parses the mins and maxs arguments from the `plot_posterior` option
group.
Parameters
----------
opts : ArgumentParser
The parsed arguments from the command line.
Returns
-------
mins : dict
Dictionary of parameter name -> specified mins. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
maxs : dict
Dictionary of parameter name -> specified maxs. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary. |
380,048 | def dist_iter(self, g_nums, ats_1, ats_2, invalid_error=False):
import numpy as np
from .utils import pack_tups
if _DEBUG:
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
arglist = self._none_subst(g_nums, ats_1, ats_2)
tups = pack_tups(*arglist)
if _DEBUG:
print(tups)
for tup in tups:
yield self._iter_return(tup, self.dist_single, invalid_error) | Iterator over selected interatomic distances.
Distances are in Bohrs as with :meth:`dist_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dist
|npfloat_| --
Interatomic distance in Bohrs between each atom pair of
`ats_1` and `ats_2` from the corresponding geometries
of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length. |
380,049 | def add_contact(self, phone_number: str, first_name: str, last_name: str=None, on_success: callable=None):
pass | Add contact by phone number and name (last_name is optional).
:param phone: Valid phone number for contact.
:param first_name: First name to use.
:param last_name: Last name to use. Optional.
:param on_success: Callback to call when adding, will contain success status and the current contact list. |
380,050 | def monthly_mean_at_each_ind(monthly_means, sub_monthly_timeseries):
time = monthly_means[TIME_STR]
start = time.indexes[TIME_STR][0].replace(day=1, hour=0)
end = time.indexes[TIME_STR][-1]
new_indices = pd.DatetimeIndex(start=start, end=end, freq=)
arr_new = monthly_means.reindex(time=new_indices, method=)
return arr_new.reindex_like(sub_monthly_timeseries, method=) | Copy monthly mean over each time index in that month.
Parameters
----------
monthly_means : xarray.DataArray
array of monthly means
sub_monthly_timeseries : xarray.DataArray
array of a timeseries at sub-monthly time resolution
Returns
-------
xarray.DataArray with eath monthly mean value from `monthly_means` repeated
at each time within that month from `sub_monthly_timeseries`
See Also
--------
monthly_mean_ts : Create timeseries of monthly mean values |
380,051 | def matlab_compatible(name):
compatible_name = [ch if ch in ALLOWED_MATLAB_CHARS else "_" for ch in name]
compatible_name = "".join(compatible_name)
if compatible_name[0] not in string.ascii_letters:
compatible_name = "M_" + compatible_name
return compatible_name[:60] | make a channel name compatible with Matlab variable naming
Parameters
----------
name : str
channel name
Returns
-------
compatible_name : str
channel name compatible with Matlab |
380,052 | def get_mcu_definition(self, project_file):
return mcu | Parse project file to get mcu definition |
380,053 | def _get_f2rx(self, C, r_x, r_1, r_2):
drx = (r_x - r_1) / (r_2 - r_1)
return self.CONSTS["h4"] + (C["h5"] * drx) + (C["h6"] * (drx ** 2.)) | Defines the f2 scaling coefficient defined in equation 10 |
380,054 | def warn_on_var_indirection(self) -> bool:
return not self.use_var_indirection and self._opts.entry(
WARN_ON_VAR_INDIRECTION, True
) | If True, warn when a Var reference cannot be direct linked (iff
use_var_indirection is False).. |
380,055 | def _handleCallInitiated(self, regexMatch, callId=None, callType=1):
if self._dialEvent:
if regexMatch:
groups = regexMatch.groups()
if len(groups) >= 2:
self._dialResponse = (int(groups[0]) , int(groups[1]))
else:
self._dialResponse = (int(groups[0]), 1)
else:
self._dialResponse = callId, callType
self._dialEvent.set() | Handler for "outgoing call initiated" event notification line |
380,056 | def process_raw_data(cls, raw_data):
properties = raw_data["properties"]
raw_content = properties.get("addressSpace", None)
if raw_content is not None:
address_space = AddressSpace.from_raw_data(raw_content)
properties["addressSpace"] = address_space
raw_content = properties.get("dhcpOptions")
if raw_content is not None:
dhcp_options = DHCPOptions.from_raw_data(raw_content)
properties["dhcpOptions"] = dhcp_options
raw_content = properties.get("logicalNetwork", None)
if raw_content is not None:
properties["logicalNetwork"] = Resource.from_raw_data(raw_content)
subnetworks = []
for raw_subnet in properties.get("subnets", []):
raw_subnet["parentResourceID"] = raw_data["resourceId"]
subnetworks.append(SubNetworks.from_raw_data(raw_subnet))
properties["subnets"] = subnetworks
return super(VirtualNetworks, cls).process_raw_data(raw_data) | Create a new model using raw API response. |
380,057 | def _trade(self, security, price=0, amount=0, volume=0, entrust_bs="buy"):
stock = self._search_stock_info(security)
balance = self.get_balance()[0]
if stock is None:
raise exceptions.TradeError(u"没有查询要操作的股票信息")
if not volume:
volume = int(float(price) * amount)
if balance["current_balance"] < volume and entrust_bs == "buy":
raise exceptions.TradeError(u"没有足够的现金进行操作")
if stock["flag"] != 1:
raise exceptions.TradeError(u"未上市、停牌、涨跌停、退市的股票无法操作。")
if volume == 0:
raise exceptions.TradeError(u"操作金额不能为零")
weight = volume / balance["asset_balance"] * 100
weight = round(weight, 2)
position_list = self._get_position()
is_have = False
for position in position_list:
if position["stock_id"] == stock["stock_id"]:
is_have = True
position["proactive"] = True
old_weight = position["weight"]
if entrust_bs == "buy":
position["weight"] = weight + old_weight
else:
if weight > old_weight:
raise exceptions.TradeError(u"操作数量大于实际可卖出数量")
else:
position["weight"] = old_weight - weight
position["weight"] = round(position["weight"], 2)
if not is_have:
if entrust_bs == "buy":
position_list.append(
{
"code": stock["code"],
"name": stock["name"],
"enName": stock["enName"],
"hasexist": stock["hasexist"],
"flag": stock["flag"],
"type": stock["type"],
"current": stock["current"],
"chg": stock["chg"],
"percent": str(stock["percent"]),
"stock_id": stock["stock_id"],
"ind_id": stock["ind_id"],
"ind_name": stock["ind_name"],
"ind_color": stock["ind_color"],
"textname": stock["name"],
"segment_name": stock["ind_name"],
"weight": round(weight, 2),
"url": "/S/" + stock["code"],
"proactive": True,
"price": str(stock["current"]),
}
)
else:
raise exceptions.TradeError(u"没有持有要卖出的股票")
if entrust_bs == "buy":
cash = (
(balance["current_balance"] - volume)
/ balance["asset_balance"]
* 100
)
else:
cash = (
(balance["current_balance"] + volume)
/ balance["asset_balance"]
* 100
)
cash = round(cash, 2)
log.debug("weight:%f, cash:%f", weight, cash)
data = {
"cash": cash,
"holdings": str(json.dumps(position_list)),
"cube_symbol": str(self.account_config["portfolio_code"]),
"segment": 1,
"comment": "",
}
try:
resp = self.s.post(self.config["rebalance_url"], data=data)
except Exception as e:
log.warning("调仓失败: %s ", e)
return None
else:
log.debug(
"调仓 %s%s: %d", entrust_bs, stock["name"], resp.status_code
)
resp_json = json.loads(resp.text)
if "error_description" in resp_json and resp.status_code != 200:
log.error("调仓错误: %s", resp_json["error_description"])
return [
{
"error_no": resp_json["error_code"],
"error_info": resp_json["error_description"],
}
]
return [
{
"entrust_no": resp_json["id"],
"init_date": self._time_strftime(resp_json["created_at"]),
"batch_no": "委托批号",
"report_no": "申报号",
"seat_no": "席位编号",
"entrust_time": self._time_strftime(
resp_json["updated_at"]
),
"entrust_price": price,
"entrust_amount": amount,
"stock_code": security,
"entrust_bs": "买入",
"entrust_type": "雪球虚拟委托",
"entrust_status": "-",
}
] | 调仓
:param security:
:param price:
:param amount:
:param volume:
:param entrust_bs:
:return: |
380,058 | def configure(cls, name, config, prefix=):
if name in cls._depots:
raise RuntimeError( % (name,))
if cls._default_depot is None:
cls._default_depot = name
cls._depots[name] = cls.from_config(config, prefix)
return cls._depots[name] | Configures an application depot.
This configures the application wide depot from a settings dictionary.
The settings dictionary is usually loaded from an application configuration
file where all the depot options are specified with a given ``prefix``.
The default ``prefix`` is *depot.*, the minimum required setting
is ``depot.backend`` which specified the required backend for files storage.
Additional options depend on the choosen backend. |
380,059 | def decrypt(self, data):
next_payload, is_critical, payload_len = const.PAYLOAD_HEADER.unpack(data[:const.PAYLOAD_HEADER.size])
next_payload = payloads.Type(next_payload)
logger.debug("next payload: {!r}".format(next_payload))
try:
iv_len = 16
iv = bytes(data[const.PAYLOAD_HEADER.size:const.PAYLOAD_HEADER.size + iv_len])
ciphertext = bytes(data[const.PAYLOAD_HEADER.size + iv_len:payload_len])
except IndexError:
raise IkeError()
logger.debug(.format(dump(iv)))
logger.debug(.format(dump(ciphertext)))
cipher = Camellia(self.SK_er, iv=iv)
decrypted = cipher.decrypt(ciphertext)
logger.debug("Decrypted packet from responder: {}".format(dump(decrypted)))
return next_payload, decrypted | Decrypts an encrypted (SK, 46) IKE payload using self.SK_er
:param data: Encrypted IKE payload including headers (payloads.SK())
:return: next_payload, data_containing_payloads
:raise IkeError: If packet is corrupted. |
380,060 | def _get_options(ret=None):
defaults = {: ,
: ,
: ,
: ,
: 3306,
: None,
: None,
: None}
attrs = {: ,
: ,
: ,
: ,
: ,
: ,
: ,
: }
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
for k, v in six.iteritems(_options):
if isinstance(v, six.string_types) and v.lower() == :
_options[k] = None
if k == :
_options[k] = int(v)
return _options | Returns options used for the MySQL connection. |
380,061 | def setClockShowDate(kvalue, **kwargs):
*
if kvalue is not True and kvalue is not False:
return False
_gsession = _GSettings(user=kwargs.get(),
schema=,
key=)
return _gsession._set(kvalue) | Set whether the date is visible in the clock
CLI Example:
.. code-block:: bash
salt '*' gnome.setClockShowDate <True|False> user=<username> |
380,062 | async def clear(self, using_db=None) -> None:
db = using_db if using_db else self.model._meta.db
through_table = Table(self.field.through)
query = (
db.query_class.from_(through_table)
.where(getattr(through_table, self.field.backward_key) == self.instance.id)
.delete()
)
await db.execute_query(str(query)) | Clears ALL relations. |
380,063 | def _getArrays(items, attr, defaultValue):
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue))
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key])
return arrays | Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...} |
380,064 | def merge_leaderboards(self, destination, keys, aggregate=):
keys.insert(0, self.leaderboard_name)
self.redis_connection.zunionstore(destination, keys, aggregate) | Merge leaderboards given by keys with this leaderboard into a named destination leaderboard.
@param destination [String] Destination leaderboard name.
@param keys [Array] Leaderboards to be merged with the current leaderboard.
@param options [Hash] Options for merging the leaderboards. |
380,065 | def load_cml(cml_filename):
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
dh = CMLMoleculeLoader()
parser.setContentHandler(dh)
parser.parse(cml_filename)
return dh.molecules | Load the molecules from a CML file
Argument:
| ``cml_filename`` -- The filename of a CML file.
Returns a list of molecule objects with optional molecular graph
attribute and extra attributes. |
380,066 | def set_uid(self):
if self.user:
uid = getpwnam(self.user).pw_uid
try:
os.setuid(uid)
except Exception:
message = ( +
)
print(message.format(self.user, self.group))
sys.exit(1) | Change the user of the running process |
380,067 | def options(self, **options):
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self | Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone. |
380,068 | def copy(self, filename, id_=-1, pre_callback=None, post_callback=None):
for repo in self._children:
if is_package(filename):
copy_method = repo.copy_pkg
else:
copy_method = repo.copy_script
if pre_callback:
pre_callback(repo.connection)
copy_method(filename, id_)
if post_callback:
post_callback(repo.connection) | Copy a package or script to all repos.
Determines appropriate location (for file shares) and type based
on file extension.
Args:
filename: String path to the local file to copy.
id_: Package or Script object ID to target. For use with JDS
and CDP DP's only. If uploading a package that does not
have a corresponding object, use id_ of -1, which is the
default.
pre_callback: Func to call before each distribution point
starts copying. Should accept a Repository connection
dictionary as a parameter. Will be called like:
`pre_callback(repo.connection)`
post_callback: Func to call after each distribution point
finishes copying. Should accept a Repository connection
dictionary as a parameter. Will be called like:
`pre_callback(repo.connection)` |
380,069 | def authenticate(cmd_args, endpoint=, force=False):
server = server_url(cmd_args)
network.check_ssl()
access_token = None
try:
assert not force
access_token = refresh_local_token(server)
except Exception:
print()
access_token = perform_oauth(get_code, cmd_args, endpoint)
email = display_student_email(cmd_args, access_token)
if not email:
log.warning()
log.debug(.format(access_token))
return access_token | Returns an OAuth token that can be passed to the server for
identification. If FORCE is False, it will attempt to use a cached token
or refresh the OAuth token. |
380,070 | def gps_0(self):
return GPSInfo(self._eph, self._epv, self._fix_type, self._satellites_visible) | GPS position information (:py:class:`GPSInfo`). |
380,071 | def _next_dir_gen(self, root):
for e in root.getiterator(common._T_COMMON_PREFIXES):
yield common.GCSFileStat(
self._path + + e.find(common._T_PREFIX).text,
st_size=None, etag=None, st_ctime=None, is_dir=True)
e.clear()
yield None | Generator for next directory element in the document.
Args:
root: root element in the XML tree.
Yields:
GCSFileStat for the next directory. |
380,072 | def cell(self, row_idx, col_idx):
return _Cell(self._tbl.tc(row_idx, col_idx), self) | Return cell at *row_idx*, *col_idx*.
Return value is an instance of |_Cell|. *row_idx* and *col_idx* are
zero-based, e.g. cell(0, 0) is the top, left cell in the table. |
380,073 | def individuals(self, ind_ids=None):
if ind_ids:
for ind_id in ind_ids:
for ind in self.individual_objs:
if ind.ind_id == ind_id:
yield ind
else:
for ind in self.individual_objs:
yield ind | Return information about individuals
Args:
ind_ids (list(str)): List of individual ids
Returns:
individuals (Iterable): Iterable with Individuals |
380,074 | def write(self, handle):
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode(),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
group = self.add_group(1, , )
add(, , 2, , ppf)
add(, , 2, , min(65535, len(self._frames)))
add(, , 2, , 0)
add(, , 4, , self._point_scale)
add(, , 4, , self._point_rate)
add_str(, , , 2)
add_str(, , , 2)
add_str(, , self._point_units, len(self._point_units))
add_str(, , .join( % i for i in range(ppf)), 5, ppf)
add_str(, , * 16 * ppf, 16, ppf)
group = self.add_group(2, , )
add(, , 2, , analog.shape[0])
add(, , 4, , analog.shape[1])
add(, , 4, , self._gen_scale)
add_empty_array(, , 4)
add_empty_array(, , 2)
group = self.add_group(3, , )
add(, , 2, , 1, 2)
add(, , 2, , len(self._frames), 2)
blocks = self.parameter_blocks()
self.get().bytes = struct.pack(, 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = np.prod(analog.shape)
self.header.analog_per_frame = analog.shape[0]
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle) | Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle. |
380,075 | def run(**options):
with Dotfile(options) as conf:
if conf[] is None:
msg = "No context file has been provided"
LOGGER.error(msg)
raise RuntimeError(msg)
if not os.path.exists(conf[]):
msg = "Context file {} not found".format(conf[])
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info(
(
"{{dockerstache}}: In: {}\n"
"{{dockerstache}}: Out: {}\n"
"{{dockerstache}}: Context: {}\n"
"{{dockerstache}}: Defaults: {}\n"
).format(conf[], conf[], conf[], conf[])
)
context = Context(conf[], conf[])
context.load()
if in options:
LOGGER.info("{{dockerstache}} Extended context provided")
context.update(options[])
process_templates(
conf[],
conf[],
context
)
if conf[]:
process_copies(
conf[],
conf[],
conf[]
)
return dict(conf) | _run_
Run the dockerstache process to render templates
based on the options provided
If extend_context is passed as options it will be used to
extend the context with the contents of the dictionary provided
via context.update(extend_context) |
380,076 | async def get_status(self, filters=None, utc=False):
client_facade = client.ClientFacade.from_connection(self.connection())
return await client_facade.FullStatus(filters) | Return the status of the model.
:param str filters: Optional list of applications, units, or machines
to include, which can use wildcards ('*').
:param bool utc: Display time as UTC in RFC3339 format |
380,077 | def pad_shape_right_with_ones(x, ndims):
if not (isinstance(ndims, int) and ndims >= 0):
raise ValueError(
.format(ndims))
if ndims == 0:
return x
x = tf.convert_to_tensor(value=x)
original_shape = x.shape
new_shape = distribution_util.pad(
tf.shape(input=x), axis=0, back=True, value=1, count=ndims)
x = tf.reshape(x, new_shape)
x.set_shape(original_shape.concatenate([1]*ndims))
return x | Maybe add `ndims` ones to `x.shape` on the right.
If `ndims` is zero, this is a no-op; otherwise, we will create and return a
new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the
right side. If the shape of `x` is known statically, the shape of the return
value will be as well.
Args:
x: The `Tensor` we'll return a reshaping of.
ndims: Python `integer` number of ones to pad onto `x.shape`.
Returns:
If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`
with `ndims` ones concatenated on the right side. If possible, returns a
`Tensor` whose shape is known statically.
Raises:
ValueError: if `ndims` is not a Python `integer` greater than or equal to
zero. |
380,078 | def get(self, sid):
return CredentialListContext(self._version, account_sid=self._solution[], sid=sid, ) | Constructs a CredentialListContext
:param sid: Fetch by unique credential list Sid
:returns: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext |
380,079 | def fields2jsonschema(self, fields, schema=None, use_refs=True, dump=True, name=None):
Meta = getattr(schema, , None)
if getattr(Meta, , None):
declared_fields = set(schema._declared_fields.keys())
if set(getattr(Meta, , set())) > declared_fields:
import warnings
warnings.warn(
,
)
jsonschema = {
: ,
: (OrderedLazyDict() if getattr(Meta, , None)
else LazyDict()),
}
exclude = set(getattr(Meta, , []))
for field_name, field_obj in iteritems(fields):
if field_name in exclude or (field_obj.dump_only and not dump):
continue
observed_field_name = self._observed_name(field_obj, field_name)
prop_func = lambda field_obj=field_obj: self.field2property(
field_obj, use_refs=use_refs, dump=dump, name=name,
)
jsonschema[][observed_field_name] = prop_func
partial = getattr(schema, , None)
if field_obj.required:
if not partial or (is_collection(partial) and field_name not in partial):
jsonschema.setdefault(, []).append(observed_field_name)
if in jsonschema:
jsonschema[].sort()
if Meta is not None:
if hasattr(Meta, ):
jsonschema[] = Meta.title
if hasattr(Meta, ):
jsonschema[] = Meta.description
if getattr(schema, , False):
jsonschema = {
: ,
: jsonschema,
}
return jsonschema | Return the JSON Schema Object for a given marshmallow
:class:`Schema <marshmallow.Schema>`. Schema may optionally provide the ``title`` and
``description`` class Meta options.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#schemaObject
Example: ::
class UserSchema(Schema):
_id = fields.Int()
email = fields.Email(description='email address of the user')
name = fields.Str()
class Meta:
title = 'User'
description = 'A registered user'
OpenAPI.schema2jsonschema(UserSchema)
# {
# 'title': 'User', 'description': 'A registered user',
# 'properties': {
# 'name': {'required': False,
# 'description': '',
# 'type': 'string'},
# '_id': {'format': 'int32',
# 'required': False,
# 'description': '',
# 'type': 'integer'},
# 'email': {'format': 'email',
# 'required': False,
# 'description': 'email address of the user',
# 'type': 'string'}
# }
# }
:param Schema schema: A marshmallow Schema instance or a class object
:rtype: dict, a JSON Schema Object |
380,080 | def parser():
query_parser = current_app.config[]
if isinstance(query_parser, six.string_types):
query_parser = import_string(query_parser)
return query_parser | Return search query parser. |
380,081 | def _really_start_hb(self):
if self._beating and not self.hb_stream.closed():
self._hb_periodic_callback.start() | callback for delayed heartbeat start
Only start the hb loop if we haven't been closed during the wait. |
380,082 | def find_permission_view_menu(self, permission_name, view_menu_name):
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
if permission and view_menu:
return self.permissionview_model.objects(
permission=permission, view_menu=view_menu
).first() | Finds and returns a PermissionView by names |
380,083 | def format(self, exclude_class=False):
if exclude_class:
msg = self.msg
else:
msg = "%s: %s" % (self.__class__.__name__, self.msg)
if len(self.params) != 0:
paramstring = "\n".join([str(key) + ": " + str(val) for key, val in self.params.items()])
msg += "\nAdditional Information:\n" + paramstring
return msg | Format this exception as a string including class name.
Args:
exclude_class (bool): Whether to exclude the exception class
name when formatting this exception
Returns:
string: a multiline string with the message, class name and
key value parameters passed to create the exception. |
380,084 | def _load_data(self, group, record_offset=0, record_count=None):
has_yielded = False
offset = 0
_count = record_count
channel_group = group.channel_group
if group.data_location == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
record_offset *= channel_group.samples_byte_nr
if group.sorted:
samples_size = channel_group.samples_byte_nr
if not samples_size:
yield b"", 0, _count
has_yielded = True
else:
if self._read_fragment_size:
split_size = self._read_fragment_size // samples_size
split_size *= samples_size
else:
channels_nr = len(group.channels)
y_axis = CONVERT
idx = searchsorted(CHANNEL_COUNT, channels_nr, side="right") - 1
if idx < 0:
idx = 0
split_size = y_axis[idx]
split_size = split_size // samples_size
split_size *= samples_size
if split_size == 0:
split_size = samples_size
blocks = iter(group.data_blocks)
cur_size = 0
data = []
while True:
try:
info = next(blocks)
address, size = info.address, info.size
current_address = address
except StopIteration:
break
if offset + size < record_offset + 1:
offset += size
continue
stream.seek(address)
if offset < record_offset:
delta = record_offset - offset
stream.read(delta)
current_address += delta
size -= delta
offset = record_offset
while size >= split_size - cur_size:
stream.seek(current_address)
if data:
data.append(stream.read(split_size - cur_size))
yield b"".join(data), offset, _count
has_yielded = True
current_address += split_size - cur_size
else:
yield stream.read(split_size), offset, _count
has_yielded = True
current_address += split_size
offset += split_size
size -= split_size - cur_size
data = []
cur_size = 0
if size:
stream.seek(current_address)
data.append(stream.read(size))
cur_size += size
offset += size
if data:
yield b"".join(data), offset, _count
has_yielded = True
elif not offset:
yield b"", 0, _count
has_yielded = True
if not has_yielded:
yield b"", 0, _count
else:
record_id = group.channel_group.record_id
cg_size = group.record_size
if group.data_group.record_id_len <= 2:
record_id_nr = group.data_group.record_id_len
else:
record_id_nr = 0
cg_data = []
blocks = group.data_blocks
for info in blocks:
address, size = info.address, info.size
stream.seek(address)
data = stream.read(size)
i = 0
while i < size:
rec_id = data[i]
i += 1
rec_size = cg_size[rec_id]
if rec_id == record_id:
rec_data = data[i : i + rec_size]
cg_data.append(rec_data)
if record_id_nr == 2:
i += rec_size + 1
else:
i += rec_size
cg_data = b"".join(cg_data)
size = len(cg_data)
if size:
if offset + size < record_offset + 1:
offset += size
continue
if offset < record_offset:
delta = record_offset - offset
size -= delta
offset = record_offset
yield cg_data, offset, _count
has_yielded = True
offset += size
if not has_yielded:
yield b"", 0, _count | get group's data block bytes |
380,085 | def detect_phantomjs(version=):
if settings.phantomjs_path() is not None:
phantomjs_path = settings.phantomjs_path()
else:
if hasattr(shutil, "which"):
phantomjs_path = shutil.which("phantomjs") or "phantomjs"
else:
phantomjs_path = "phantomjs"
try:
proc = Popen([phantomjs_path, "--version"], stdout=PIPE, stderr=PIPE)
proc.wait()
out = proc.communicate()
if len(out[1]) > 0:
raise RuntimeError( % out[1].decode())
required = V(version)
installed = V(out[0].decode())
if installed < required:
raise RuntimeError( % (required, installed))
except OSError:
raise RuntimeError()
return phantomjs_path | Detect if PhantomJS is avaiable in PATH, at a minimum version.
Args:
version (str, optional) :
Required minimum version for PhantomJS (mostly for testing)
Returns:
str, path to PhantomJS |
380,086 | def variable_map_items(variable_map):
for key, var_or_vars in six.iteritems(variable_map):
if isinstance(var_or_vars, (list, tuple)):
for variable in var_or_vars:
yield key, variable
else:
yield key, var_or_vars | Yields an iterator over (string, variable) pairs in the variable map.
In general, variable maps map variable names to either a `tf.Variable`, or
list of `tf.Variable`s (in case of sliced variables).
Args:
variable_map: dict, variable map over which to iterate.
Yields:
(string, tf.Variable) pairs. |
380,087 | def _add_mac_token(self, uri, http_method=, body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
if token_placement != AUTH_HEADER:
raise ValueError("Invalid token placement.")
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body | Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable. |
380,088 | def do_one_iteration(self):
if self.control_stream:
self.control_stream.flush()
for stream in self.shell_streams:
stream.flush(zmq.POLLIN, 1)
stream.flush(zmq.POLLOUT) | step eventloop just once |
380,089 | def list_installed():
*
result = __salt__[](_package_name(), versions_as_list=True)
if result is None:
return []
if six.PY2:
return sorted(result, cmp=_cmp_version)
else:
return sorted(result, key=functools.cmp_to_key(_cmp_version)) | Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed |
380,090 | def check_cv(cv=3, y=None, classifier=False):
if cv is None:
cv = 3
if not is_dask_collection(y) or not isinstance(cv, numbers.Integral):
return model_selection.check_cv(cv, y, classifier)
if classifier:
target_type = delayed(type_of_target, pure=True)(y).compute()
if target_type in ("binary", "multiclass"):
return StratifiedKFold(cv)
return KFold(cv) | Dask aware version of ``sklearn.model_selection.check_cv``
Same as the scikit-learn version, but works if ``y`` is a dask object. |
380,091 | def is_period_arraylike(arr):
if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return is_period_dtype(arr.dtype)
return getattr(arr, , None) == | Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical array-like or
PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True |
380,092 | def identify_protocol(method, value):
for protocol_name in PROTOCOLS:
protocol = importlib.import_module(f"federation.protocols.{protocol_name}.protocol")
if getattr(protocol, f"identify_{method}")(value):
return protocol
else:
raise NoSuitableProtocolFoundError() | Loop through protocols, import the protocol module and try to identify the id or request. |
380,093 | def blackbox_and_coarse_grain(blackbox, coarse_grain):
if blackbox is None:
return
for box in blackbox.partition:
outputs = set(box) & set(blackbox.output_indices)
if coarse_grain is None and len(outputs) > 1:
raise ValueError(
)
if (coarse_grain and not any(outputs.issubset(part)
for part in coarse_grain.partition)):
raise ValueError(
) | Validate that a coarse-graining properly combines the outputs of a
blackboxing. |
380,094 | def _handle_utf8_payload(body, properties):
if not in properties:
properties[] =
encoding = properties[]
if compatibility.is_unicode(body):
body = body.encode(encoding)
elif compatibility.PYTHON3 and isinstance(body, str):
body = bytes(body, encoding=encoding)
return body | Update the Body and Properties to the appropriate encoding.
:param bytes|str|unicode body: Message payload
:param dict properties: Message properties
:return: |
380,095 | def schur_complement(mat, row, col):
a = mat[:row, :col]
b = mat[:row, col:]
c = mat[row:, :col]
d = mat[row:, col:]
return a - b.dot(d.I).dot(c) | compute the schur complement of the matrix block mat[row:,col:] of the matrix mat |
380,096 | def append_op(self, operation):
if operation not in self.ops:
self.ops.append(operation)
return self | Append an :class:`Operation <stellar_base.operation.Operation>` to
the list of operations.
Add the operation specified if it doesn't already exist in the list of
operations of this :class:`Builder` instance.
:param operation: The operation to append to the list of operations.
:type operation: :class:`Operation`
:return: This builder instance. |
380,097 | def count_names_by_namespace(graph, namespace):
if namespace not in graph.defined_namespace_keywords:
raise IndexError(.format(namespace, graph))
return Counter(_namespace_filtered_iterator(graph, namespace)) | Get the set of all of the names in a given namespace that are in the graph.
:param pybel.BELGraph graph: A BEL graph
:param str namespace: A namespace keyword
:return: A counter from {name: frequency}
:rtype: collections.Counter
:raises IndexError: if the namespace is not defined in the graph. |
380,098 | def deleteThreads(self, thread_ids):
thread_ids = require_list(thread_ids)
data_unpin = dict()
data_delete = dict()
for i, thread_id in enumerate(thread_ids):
data_unpin["ids[{}]".format(thread_id)] = "false"
data_delete["ids[{}]".format(i)] = thread_id
r_unpin = self._post(self.req_url.PINNED_STATUS, data_unpin)
r_delete = self._post(self.req_url.DELETE_THREAD, data_delete)
return r_unpin.ok and r_delete.ok | Deletes threads
:param thread_ids: Thread IDs to delete. See :ref:`intro_threads`
:return: Whether the request was successful
:raises: FBchatException if request failed |
380,099 | def logout(self):
response = None
try:
response = requests.delete(
urls.login(),
headers={
: .format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) | Logout and remove vid |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.