code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def sepBy(p, sep):
'''`sepBy(p, sep)` parses zero or more occurrences of p, separated by `sep`.
Returns a list of values returned by `p`.'''
return separated(p, sep, 0, maxt=float('inf'), end=False) | `sepBy(p, sep)` parses zero or more occurrences of p, separated by `sep`.
Returns a list of values returned by `p`. |
async def _download_predicate_data(self, class_, controller):
"""Get raw predicate information for given request class, and cache for
subsequent calls.
"""
await self.authenticate()
url = ('{0}{1}/modeldef/class/{2}'
.format(self.base_url, controller, class_))
resp = await self._ratelimited_get(url)
await _raise_for_status(resp)
resp_json = await resp.json()
return resp_json['data'] | Get raw predicate information for given request class, and cache for
subsequent calls. |
def _transport_interceptor(self, callback):
"""Takes a callback function and returns a function that takes headers and
messages and places them on the main service queue."""
def add_item_to_queue(header, message):
queue_item = (
Priority.TRANSPORT,
next(
self._transport_interceptor_counter
), # insertion sequence to keep messages in order
(callback, header, message),
)
self.__queue.put(
queue_item
) # Block incoming transport until insertion completes
return add_item_to_queue | Takes a callback function and returns a function that takes headers and
messages and places them on the main service queue. |
def is_anagram(s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
maps = {}
mapt = {}
for i in s:
maps[i] = maps.get(i, 0) + 1
for i in t:
mapt[i] = mapt.get(i, 0) + 1
return maps == mapt | :type s: str
:type t: str
:rtype: bool |
def v_from_i(resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent, method='lambertw'):
'''
Device voltage at the given device current for the single diode model.
Uses the single diode model (SDM) as described in, e.g.,
Jain and Kapoor 2004 [1].
The solution is per Eq 3 of [1] except when resistance_shunt=numpy.inf,
in which case the explict solution for voltage is used.
Ideal device parameters are specified by resistance_shunt=np.inf and
resistance_series=0.
Inputs to this function can include scalars and pandas.Series, but it is
the caller's responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
current : numeric
The current in amperes under desired IV curve conditions.
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
method : str
Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:
``'brentq'`` is limited to 1st quadrant only.
Returns
-------
current : np.ndarray or scalar
References
----------
[1] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
'''
if method.lower() == 'lambertw':
return _singlediode._lambertw_v_from_i(
resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent
)
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (current, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
V = _singlediode.bishop88_v_from_i(*args, method=method.lower())
# find the right size and shape for returns
size, shape = _singlediode._get_size_and_shape(args)
if size <= 1:
if shape is not None:
V = np.tile(V, shape)
if np.isnan(V).any() and size <= 1:
V = np.repeat(V, size)
if shape is not None:
V = V.reshape(shape)
return V | Device voltage at the given device current for the single diode model.
Uses the single diode model (SDM) as described in, e.g.,
Jain and Kapoor 2004 [1].
The solution is per Eq 3 of [1] except when resistance_shunt=numpy.inf,
in which case the explict solution for voltage is used.
Ideal device parameters are specified by resistance_shunt=np.inf and
resistance_series=0.
Inputs to this function can include scalars and pandas.Series, but it is
the caller's responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
current : numeric
The current in amperes under desired IV curve conditions.
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
method : str
Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:
``'brentq'`` is limited to 1st quadrant only.
Returns
-------
current : np.ndarray or scalar
References
----------
[1] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277. |
def lookup(self, hostname):
"""
Return a dict (`SSHConfigDict`) of config options for a given hostname.
The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
For each parameter, the first obtained value will be used. The
configuration files contain sections separated by ``Host``
specifications, and that section is only applied for hosts that match
one of the patterns given in the specification.
Since the first obtained value for each parameter is used, more host-
specific declarations should be given near the beginning of the file,
and general defaults at the end.
The keys in the returned dict are all normalized to lowercase (look for
``"port"``, not ``"Port"``. The values are processed according to the
rules for substitution variable expansion in ``ssh_config``.
Finally, please see the docs for `SSHConfigDict` for deeper info on
features such as optional type conversion methods, e.g.::
conf = my_config.lookup('myhost')
assert conf['passwordauthentication'] == 'yes'
assert conf.as_bool('passwordauthentication') is True
:param str hostname: the hostname to lookup
.. versionchanged:: 2.5
Returns `SSHConfigDict` objects instead of dict literals.
"""
matches = [
config
for config in self._config
if self._allowed(config["host"], hostname)
]
ret = SSHConfigDict()
for match in matches:
for key, value in match["config"].items():
if key not in ret:
# Create a copy of the original value,
# else it will reference the original list
# in self._config and update that value too
# when the extend() is being called.
ret[key] = value[:] if value is not None else value
elif key == "identityfile":
ret[key].extend(value)
ret = self._expand_variables(ret, hostname)
# TODO: remove in 3.x re #670
if "proxycommand" in ret and ret["proxycommand"] is None:
del ret["proxycommand"]
return ret | Return a dict (`SSHConfigDict`) of config options for a given hostname.
The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
For each parameter, the first obtained value will be used. The
configuration files contain sections separated by ``Host``
specifications, and that section is only applied for hosts that match
one of the patterns given in the specification.
Since the first obtained value for each parameter is used, more host-
specific declarations should be given near the beginning of the file,
and general defaults at the end.
The keys in the returned dict are all normalized to lowercase (look for
``"port"``, not ``"Port"``. The values are processed according to the
rules for substitution variable expansion in ``ssh_config``.
Finally, please see the docs for `SSHConfigDict` for deeper info on
features such as optional type conversion methods, e.g.::
conf = my_config.lookup('myhost')
assert conf['passwordauthentication'] == 'yes'
assert conf.as_bool('passwordauthentication') is True
:param str hostname: the hostname to lookup
.. versionchanged:: 2.5
Returns `SSHConfigDict` objects instead of dict literals. |
def ccdmask(flat1, flat2=None, mask=None, lowercut=6.0, uppercut=6.0,
siglev=1.0, mode='region', nmed=(7, 7), nsig=(15, 15)):
"""Find cosmetic defects in a detector using two flat field images.
Two arrays representing flat fields of different exposure times are
required. Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between `flat2` and `flat1`. The median of the ratio
is computed and subtracted. Then, the standard deviation is estimated
computing the percentiles
nearest to the pixel values corresponding to`siglev` in the normal CDF.
The standard deviation is then the distance between the pixel values
divided by two times `siglev`. The ratio image is then normalized with
this standard deviation.
The behavior of the function depends on the value of the parameter
`mode`. If the value is 'region' (the default), both the median
and the sigma are computed in boxes. If the value is 'full', these
values are computed using the full array.
The size of the boxes in 'region' mode is given by `nmed` for
the median computation and `nsig` for the standard deviation.
The values in the normalized ratio array above `uppercut`
are flagged as hot pixels, and those below '-lowercut` are
flagged as dead pixels in the output mask.
:parameter flat1: an array representing a flat illuminated exposure.
:parameter flat2: an array representing a flat illuminated exposure.
:parameter mask: an integer array representing initial mask.
:parameter lowercut: values below this sigma level are flagged as dead pixels.
:parameter uppercut: values above this sigma level are flagged as hot pixels.
:parameter siglev: level to estimate the standard deviation.
:parameter mode: either 'full' or 'region'
:parameter nmed: region used to compute the median
:parameter nsig: region used to estimate the standard deviation
:returns: the normalized ratio of the flats, the updated mask and standard deviation
.. note::
This function is based on the description of the task
ccdmask of IRAF
.. seealso::
:py:func:`cosmetics`
Operates much like this function but computes
median and sigma in the whole image instead of in boxes
"""
if flat2 is None:
# we have to swap flat1 and flat2, and
# make flat1 an array of 1s
flat1, flat2 = flat2, flat1
flat1 = numpy.ones_like(flat2)
if mask is None:
mask = numpy.zeros_like(flat1, dtype='int')
ratio = numpy.zeros_like(flat1)
invalid = numpy.zeros_like(flat1)
invalid[mask == PIXEL_HOT] = HIGH_SIGMA
invalid[mask == PIXEL_DEAD] = LOW_SIGMA
gmask = mask == PIXEL_VALID
_logger.info('valid points in input mask %d', numpy.count_nonzero(gmask))
smask = mask != PIXEL_VALID
_logger.info('invalid points in input mask %d', numpy.count_nonzero(smask))
# check if there are zeros in flat1 and flat2
zero_mask = numpy.logical_or(flat1[gmask] <= 0, flat2[gmask] <= 0)
# if there is something in zero mask
# we update the mask
if numpy.any(zero_mask):
mask, gmask, smask = update_mask(mask, gmask, zero_mask, PIXEL_DEAD)
invalid[mask == PIXEL_DEAD] = LOW_SIGMA
# ratio of flats
ratio[gmask] = flat2[gmask] / flat1[gmask]
ratio[smask] = invalid[smask]
if mode == 'region':
_logger.info('computing median in boxes of %r', nmed)
ratio_med = scipy.ndimage.filters.median_filter(ratio, size=nmed)
# subtracting the median map
ratio[gmask] -= ratio_med[gmask]
else:
_logger.info('computing median in full array')
ratio_med = numpy.median(ratio[gmask])
ratio[gmask] -= ratio_med
# Quantiles that contain nsig sigma in normal distribution
qns = 100 * scipy.stats.norm.cdf(siglev)
pns = 100 - qns
_logger.info('percentiles at siglev=%f', siglev)
_logger.info('low %f%% high %f%%', pns, qns)
# in several blocks of shape nsig
# we estimate sigma
sigma = numpy.zeros_like(ratio)
if mode == 'region':
mshape = max_blk_coverage(blk=nsig, shape=ratio.shape)
_logger.info('estimating sigma in boxes of %r', nsig)
_logger.info('shape covered by boxes is %r', mshape)
block_gen = blk_nd_short(blk=nsig, shape=ratio.shape)
else:
mshape = ratio.shape
_logger.info('estimating sigma in full array')
# slice(None) is equivalent to [:]
block_gen = itertools.repeat(slice(None), 1)
for blk in block_gen:
# mask for this region
m = mask[blk] == PIXEL_VALID
valid_points = numpy.ravel(ratio[blk][m])
ls = scipy.stats.scoreatpercentile(valid_points, pns)
hs = scipy.stats.scoreatpercentile(valid_points, qns)
_logger.debug('score at percentiles')
_logger.debug('low %f high %f', ls, hs)
# sigma estimation
sig = (hs - ls) / (2 * siglev)
_logger.debug('sigma estimation is %f ', sig)
# normalized points
sigma[blk] = sig
# fill regions of sigma not computed
fill0 = ratio.shape[0] - mshape[0]
fill1 = ratio.shape[1] - mshape[1]
if fill0 > 0:
_logger.info('filling %d rows in sigma image', fill0)
sigma[:, mshape[0]:] = sigma[:, mshape[0] - fill0:mshape[0]]
if fill1 > 0:
_logger.info('filling %d columns in sigma image', fill1)
sigma[mshape[1]:, :] = sigma[mshape[1] - fill1:mshape[1], :]
# invalid_sigma = sigma <= 0.0
# if numpy.any(invalid_sigma):
# _logger.info('updating mask with points where sigma <=0')
# mask, gmask, smask = update_mask(mask, gmask, invalid_sigma, PIXEL_HOT)
# invalid[mask == PIXEL_HOT] = HIGH_SIGMA
ratio[gmask] /= sigma[gmask]
f1_ratio = ratio[gmask]
f1_mask = mask[gmask]
f1_mask[f1_ratio >= uppercut] = PIXEL_HOT
f1_mask[f1_ratio <= -lowercut] = PIXEL_DEAD
mask[gmask] = f1_mask
return ratio, mask, sigma | Find cosmetic defects in a detector using two flat field images.
Two arrays representing flat fields of different exposure times are
required. Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between `flat2` and `flat1`. The median of the ratio
is computed and subtracted. Then, the standard deviation is estimated
computing the percentiles
nearest to the pixel values corresponding to`siglev` in the normal CDF.
The standard deviation is then the distance between the pixel values
divided by two times `siglev`. The ratio image is then normalized with
this standard deviation.
The behavior of the function depends on the value of the parameter
`mode`. If the value is 'region' (the default), both the median
and the sigma are computed in boxes. If the value is 'full', these
values are computed using the full array.
The size of the boxes in 'region' mode is given by `nmed` for
the median computation and `nsig` for the standard deviation.
The values in the normalized ratio array above `uppercut`
are flagged as hot pixels, and those below '-lowercut` are
flagged as dead pixels in the output mask.
:parameter flat1: an array representing a flat illuminated exposure.
:parameter flat2: an array representing a flat illuminated exposure.
:parameter mask: an integer array representing initial mask.
:parameter lowercut: values below this sigma level are flagged as dead pixels.
:parameter uppercut: values above this sigma level are flagged as hot pixels.
:parameter siglev: level to estimate the standard deviation.
:parameter mode: either 'full' or 'region'
:parameter nmed: region used to compute the median
:parameter nsig: region used to estimate the standard deviation
:returns: the normalized ratio of the flats, the updated mask and standard deviation
.. note::
This function is based on the description of the task
ccdmask of IRAF
.. seealso::
:py:func:`cosmetics`
Operates much like this function but computes
median and sigma in the whole image instead of in boxes |
def delete_dashboard(self, id, **kwargs): # noqa: E501
"""Delete a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dashboard(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerDashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_dashboard_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_dashboard_with_http_info(id, **kwargs) # noqa: E501
return data | Delete a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dashboard(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerDashboard
If the method is called asynchronously,
returns the request thread. |
def remove_from_category(self, category):
"""Removes this object from a given category.
:param Category category:
:return:
"""
ctype = ContentType.objects.get_for_model(self)
self.categories.model.objects.filter(category=category, content_type=ctype, object_id=self.id).delete() | Removes this object from a given category.
:param Category category:
:return: |
def com_adobe_fonts_check_family_consistent_upm(ttFonts):
"""Fonts have consistent Units Per Em?"""
upm_set = set()
for ttFont in ttFonts:
upm_set.add(ttFont['head'].unitsPerEm)
if len(upm_set) > 1:
yield FAIL, ("Fonts have different units per em: {}."
).format(sorted(upm_set))
else:
yield PASS, "Fonts have consistent units per em." | Fonts have consistent Units Per Em? |
def pypi(
click_ctx,
requirements,
index=None,
python_version=3,
exclude_packages=None,
output=None,
subgraph_check_api=None,
no_transitive=True,
no_pretty=False,
):
"""Manipulate with dependency requirements using PyPI."""
requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement]
if not requirements:
_LOG.error("No requirements specified, exiting")
sys.exit(1)
if not subgraph_check_api:
_LOG.info(
"No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided"
) # Ignore PycodestyleBear (E501)
result = resolve_python(
requirements,
index_urls=index.split(",") if index else ("https://pypi.org/simple",),
python_version=int(python_version),
transitive=not no_transitive,
exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))),
subgraph_check_api=subgraph_check_api,
)
print_command_result(
click_ctx,
result,
analyzer=analyzer_name,
analyzer_version=analyzer_version,
output=output or "-",
pretty=not no_pretty,
) | Manipulate with dependency requirements using PyPI. |
def verify_signature(self,
signing_key,
message,
signature,
padding_method,
signing_algorithm=None,
hashing_algorithm=None,
digital_signature_algorithm=None):
"""
Verify a message signature.
Args:
signing_key (bytes): The bytes of the signing key to use for
signature verification. Required.
message (bytes): The bytes of the message that corresponds with
the signature. Required.
signature (bytes): The bytes of the signature to be verified.
Required.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use during signature verification. Required.
signing_algorithm (CryptographicAlgorithm): An enumeration
specifying the cryptographic algorithm to use for signature
verification. Only RSA is supported. Optional, must match the
algorithm specified by the digital signature algorithm if both
are provided. Defaults to None.
hashing_algorithm (HashingAlgorithm): An enumeration specifying
the hashing algorithm to use with the cryptographic algortihm,
if needed. Optional, must match the algorithm specified by the
digital signature algorithm if both are provided. Defaults to
None.
digital_signature_algorithm (DigitalSignatureAlgorithm): An
enumeration specifying both the cryptographic and hashing
algorithms to use for signature verification. Optional, must
match the cryptographic and hashing algorithms if both are
provided. Defaults to None.
Returns:
boolean: the result of signature verification, True for valid
signatures, False for invalid signatures
Raises:
InvalidField: Raised when various settings or values are invalid.
CryptographicFailure: Raised when the signing key bytes cannot be
loaded, or when the signature verification process fails
unexpectedly.
"""
backend = default_backend()
hash_algorithm = None
dsa_hash_algorithm = None
dsa_signing_algorithm = None
if hashing_algorithm:
hash_algorithm = self._encryption_hash_algorithms.get(
hashing_algorithm
)
if digital_signature_algorithm:
algorithm_pair = self._digital_signature_algorithms.get(
digital_signature_algorithm
)
if algorithm_pair:
dsa_hash_algorithm = algorithm_pair[0]
dsa_signing_algorithm = algorithm_pair[1]
if dsa_hash_algorithm and dsa_signing_algorithm:
if hash_algorithm and (hash_algorithm != dsa_hash_algorithm):
raise exceptions.InvalidField(
"The hashing algorithm does not match the digital "
"signature algorithm."
)
if (signing_algorithm and
(signing_algorithm != dsa_signing_algorithm)):
raise exceptions.InvalidField(
"The signing algorithm does not match the digital "
"signature algorithm."
)
signing_algorithm = dsa_signing_algorithm
hash_algorithm = dsa_hash_algorithm
if signing_algorithm == enums.CryptographicAlgorithm.RSA:
if padding_method == enums.PaddingMethod.PSS:
if hash_algorithm:
padding = asymmetric_padding.PSS(
mgf=asymmetric_padding.MGF1(hash_algorithm()),
salt_length=asymmetric_padding.PSS.MAX_LENGTH
)
else:
raise exceptions.InvalidField(
"A hashing algorithm must be specified for PSS "
"padding."
)
elif padding_method == enums.PaddingMethod.PKCS1v15:
padding = asymmetric_padding.PKCS1v15()
else:
raise exceptions.InvalidField(
"The padding method '{0}' is not supported for signature "
"verification.".format(padding_method)
)
try:
public_key = backend.load_der_public_key(signing_key)
except Exception:
try:
public_key = backend.load_pem_public_key(signing_key)
except Exception:
raise exceptions.CryptographicFailure(
"The signing key bytes could not be loaded."
)
try:
public_key.verify(
signature,
message,
padding,
hash_algorithm()
)
return True
except errors.InvalidSignature:
return False
except Exception:
raise exceptions.CryptographicFailure(
"The signature verification process failed."
)
else:
raise exceptions.InvalidField(
"The signing algorithm '{0}' is not supported for "
"signature verification.".format(signing_algorithm)
) | Verify a message signature.
Args:
signing_key (bytes): The bytes of the signing key to use for
signature verification. Required.
message (bytes): The bytes of the message that corresponds with
the signature. Required.
signature (bytes): The bytes of the signature to be verified.
Required.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use during signature verification. Required.
signing_algorithm (CryptographicAlgorithm): An enumeration
specifying the cryptographic algorithm to use for signature
verification. Only RSA is supported. Optional, must match the
algorithm specified by the digital signature algorithm if both
are provided. Defaults to None.
hashing_algorithm (HashingAlgorithm): An enumeration specifying
the hashing algorithm to use with the cryptographic algortihm,
if needed. Optional, must match the algorithm specified by the
digital signature algorithm if both are provided. Defaults to
None.
digital_signature_algorithm (DigitalSignatureAlgorithm): An
enumeration specifying both the cryptographic and hashing
algorithms to use for signature verification. Optional, must
match the cryptographic and hashing algorithms if both are
provided. Defaults to None.
Returns:
boolean: the result of signature verification, True for valid
signatures, False for invalid signatures
Raises:
InvalidField: Raised when various settings or values are invalid.
CryptographicFailure: Raised when the signing key bytes cannot be
loaded, or when the signature verification process fails
unexpectedly. |
def _setLearningMode(self, l4Learning = False, l2Learning=False):
"""
Sets the learning mode for L4 and L2.
"""
for column in self.L4Columns:
column.setParameter("learn", 0, l4Learning)
for column in self.L2Columns:
column.setParameter("learningMode", 0, l2Learning) | Sets the learning mode for L4 and L2. |
def format(self, status, headers, environ, bucket, delay):
"""
Formats a response entity. Returns a tuple of the desired
status code and the formatted entity. The default status code
is passed in, as is a dictionary of headers.
:param status: The default status code. Should be returned to
the caller, or an alternate selected. The
status code should include both the number and
the message, separated by a single space.
:param headers: A dictionary of headers for the response.
Should update the 'Content-Type' header at a
minimum.
:param environ: The WSGI environment for the request.
:param bucket: The bucket containing the data which caused the
delay decision to be made. This can be used to
obtain such information as the next time the
request can be made.
:param delay: The number of seconds by which the request
should be delayed.
"""
# This is a default response entity, which can be overridden
# by limit subclasses.
entity = ("This request was rate-limited. "
"Please retry your request after %s." %
time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime(bucket.next)))
headers['Content-Type'] = 'text/plain'
return status, entity | Formats a response entity. Returns a tuple of the desired
status code and the formatted entity. The default status code
is passed in, as is a dictionary of headers.
:param status: The default status code. Should be returned to
the caller, or an alternate selected. The
status code should include both the number and
the message, separated by a single space.
:param headers: A dictionary of headers for the response.
Should update the 'Content-Type' header at a
minimum.
:param environ: The WSGI environment for the request.
:param bucket: The bucket containing the data which caused the
delay decision to be made. This can be used to
obtain such information as the next time the
request can be made.
:param delay: The number of seconds by which the request
should be delayed. |
def move(self, x, y):
"""Changes the overlay's position relative to the IFramebuffer.
in x of type int
in y of type int
"""
if not isinstance(x, baseinteger):
raise TypeError("x can only be an instance of type baseinteger")
if not isinstance(y, baseinteger):
raise TypeError("y can only be an instance of type baseinteger")
self._call("move",
in_p=[x, y]) | Changes the overlay's position relative to the IFramebuffer.
in x of type int
in y of type int |
def from_word2vec(fname, fvocab=None, binary=False):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
"""
vocabulary = None
if fvocab is not None:
logger.info("loading word counts from %s" % (fvocab))
vocabulary = Embedding.from_word2vec_vocab(fvocab)
logger.info("loading projection weights from %s" % (fname))
if binary:
words, vectors = Embedding._from_word2vec_binary(fname)
else:
words, vectors = Embedding._from_word2vec_text(fname)
if not vocabulary:
vocabulary = OrderedVocabulary(words=words)
return Embedding(vocabulary=vocabulary, vectors=vectors) | Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool). |
def append(self, item):
"""
See :meth:`list.append()` method
Calls observer ``self.observer(UpdateType.CREATED, item, index)`` where
**index** is *item position*
"""
self.real_list.append(item)
self.observer(UpdateType.CREATED, item, len(self.real_list) - 1) | See :meth:`list.append()` method
Calls observer ``self.observer(UpdateType.CREATED, item, index)`` where
**index** is *item position* |
def bilinear_sampling(input_layer, x, y, name=PROVIDED):
"""Performs bilinear sampling. This must be a rank 4 Tensor.
Implements the differentiable sampling mechanism with bilinear kernel
in https://arxiv.org/abs/1506.02025.
Given (x, y) coordinates for each output pixel, use bilinear sampling on
the input_layer to fill the output.
Args:
input_layer: The chainable object, supplied.
x: A tensor of size [batch_size, height, width, 1] representing the sampling
x coordinates normalized to range [-1,1].
y: A tensor of size [batch_size, height, width, 1] representing the
sampling y coordinates normalized to range [-1,1].
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer
"""
input_layer.get_shape().assert_has_rank(4)
return _interpolate(im=input_layer, x=x, y=y, name=name) | Performs bilinear sampling. This must be a rank 4 Tensor.
Implements the differentiable sampling mechanism with bilinear kernel
in https://arxiv.org/abs/1506.02025.
Given (x, y) coordinates for each output pixel, use bilinear sampling on
the input_layer to fill the output.
Args:
input_layer: The chainable object, supplied.
x: A tensor of size [batch_size, height, width, 1] representing the sampling
x coordinates normalized to range [-1,1].
y: A tensor of size [batch_size, height, width, 1] representing the
sampling y coordinates normalized to range [-1,1].
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer |
def validate(self, instance, value):
"""Checks that value is a complex number
Floats and Integers are coerced to complex numbers
"""
try:
compval = complex(value)
if not self.cast and (
abs(value.real - compval.real) > TOL or
abs(value.imag - compval.imag) > TOL
):
self.error(
instance=instance,
value=value,
extra='Not within tolerance range of {}.'.format(TOL),
)
except (TypeError, ValueError, AttributeError):
self.error(instance, value)
return compval | Checks that value is a complex number
Floats and Integers are coerced to complex numbers |
def stringify(self) :
"a pretty str version of getChain()"
l = []
h = self.head
while h :
l.append(str(h._key))
h = h.nextDoc
return "<->".join(l) | a pretty str version of getChain() |
def parse_version(str_):
"""
Parses the program's version from a python variable declaration.
"""
v = re.findall(r"\d+.\d+.\d+", str_)
if v:
return v[0]
else:
print("cannot parse string {}".format(str_))
raise KeyError | Parses the program's version from a python variable declaration. |
def send_signal(self, backend, signal):
"""
Sends the `signal` signal to `backend`. Raises ValueError if `backend`
is not registered with the client. Returns the result.
"""
backend = self._expand_host(backend)
if backend in self.backends:
try:
return self._work(backend, self._package(signal), log=False)
except socket.error:
raise BackendNotAvailableError
else:
raise ValueError('No such backend!') | Sends the `signal` signal to `backend`. Raises ValueError if `backend`
is not registered with the client. Returns the result. |
def wgs84togcj02(lng, lat):
"""
WGS84转GCJ02(火星坐标系)
:param lng:WGS84坐标系的经度
:param lat:WGS84坐标系的纬度
:return:
"""
if out_of_china(lng, lat): # 判断是否在国内
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglng, mglat] | WGS84转GCJ02(火星坐标系)
:param lng:WGS84坐标系的经度
:param lat:WGS84坐标系的纬度
:return: |
def _get_imported_module(self, module_name):
"""try to get imported module reference by its name"""
# if imported module on module_set add to list
imp_mod = self.by_name.get(module_name)
if imp_mod:
return imp_mod
# last part of import section might not be a module
# remove last section
no_obj = module_name.rsplit('.', 1)[0]
imp_mod2 = self.by_name.get(no_obj)
if imp_mod2:
return imp_mod2
# special case for __init__
if module_name in self.pkgs:
pkg_name = module_name + ".__init__"
return self.by_name[pkg_name]
if no_obj in self.pkgs:
pkg_name = no_obj + ".__init__"
return self.by_name[pkg_name] | try to get imported module reference by its name |
def isect(list1, list2):
r"""
returns list1 elements that are also in list2. preserves order of list1
intersect_ordered
Args:
list1 (list):
list2 (list):
Returns:
list: new_list
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list1 = ['featweight_rowid', 'feature_rowid', 'config_rowid', 'featweight_forground_weight']
>>> list2 = [u'featweight_rowid']
>>> result = intersect_ordered(list1, list2)
>>> print(result)
['featweight_rowid']
Timeit:
def timeit_func(func, *args):
niter = 10
times = []
for count in range(niter):
with ut.Timer(verbose=False) as t:
_ = func(*args)
times.append(t.ellapsed)
return sum(times) / niter
grid = {
'size1': [1000, 5000, 10000, 50000],
'size2': [1000, 5000, 10000, 50000],
#'overlap': [0, 1],
}
data = []
for kw in ut.all_dict_combinations(grid):
pool = np.arange(kw['size1'] * 2)
size2 = size1 = kw['size1']
size2 = kw['size2']
list1 = (np.random.rand(size1) * size1).astype(np.int32).tolist()
list1 = ut.random_sample(pool, size1).tolist()
list2 = ut.random_sample(pool, size2).tolist()
list1 = set(list1)
list2 = set(list2)
kw['ut'] = timeit_func(ut.isect, list1, list2)
#kw['np1'] = timeit_func(np.intersect1d, list1, list2)
#kw['py1'] = timeit_func(lambda a, b: set.intersection(set(a), set(b)), list1, list2)
kw['py2'] = timeit_func(lambda a, b: sorted(set.intersection(set(a), set(b))), list1, list2)
data.append(kw)
import pandas as pd
pd.options.display.max_rows = 1000
pd.options.display.width = 1000
df = pd.DataFrame.from_dict(data)
data_keys = list(grid.keys())
other_keys = ut.setdiff(df.columns, data_keys)
df = df.reindex_axis(data_keys + other_keys, axis=1)
df['abs_change'] = df['ut'] - df['py2']
df['pct_change'] = df['abs_change'] / df['ut'] * 100
#print(df.sort('abs_change', ascending=False))
print(str(df).split('\n')[0])
for row in df.values:
argmin = row[len(data_keys):len(data_keys) + len(other_keys)].argmin() + len(data_keys)
print(' ' + ', '.join([
'%6d' % (r) if x < len(data_keys) else (
ut.color_text('%8.6f' % (r,), 'blue')
if x == argmin else '%8.6f' % (r,))
for x, r in enumerate(row)
]))
%timeit ut.isect(list1, list2)
%timeit np.intersect1d(list1, list2, assume_unique=True)
%timeit set.intersection(set(list1), set(list2))
#def highlight_max(s):
# '''
# highlight the maximum in a Series yellow.
# '''
# is_max = s == s.max()
# return ['background-color: yellow' if v else '' for v in is_max]
#df.style.apply(highlight_max)
"""
set2 = set(list2)
return [item for item in list1 if item in set2] | r"""
returns list1 elements that are also in list2. preserves order of list1
intersect_ordered
Args:
list1 (list):
list2 (list):
Returns:
list: new_list
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list1 = ['featweight_rowid', 'feature_rowid', 'config_rowid', 'featweight_forground_weight']
>>> list2 = [u'featweight_rowid']
>>> result = intersect_ordered(list1, list2)
>>> print(result)
['featweight_rowid']
Timeit:
def timeit_func(func, *args):
niter = 10
times = []
for count in range(niter):
with ut.Timer(verbose=False) as t:
_ = func(*args)
times.append(t.ellapsed)
return sum(times) / niter
grid = {
'size1': [1000, 5000, 10000, 50000],
'size2': [1000, 5000, 10000, 50000],
#'overlap': [0, 1],
}
data = []
for kw in ut.all_dict_combinations(grid):
pool = np.arange(kw['size1'] * 2)
size2 = size1 = kw['size1']
size2 = kw['size2']
list1 = (np.random.rand(size1) * size1).astype(np.int32).tolist()
list1 = ut.random_sample(pool, size1).tolist()
list2 = ut.random_sample(pool, size2).tolist()
list1 = set(list1)
list2 = set(list2)
kw['ut'] = timeit_func(ut.isect, list1, list2)
#kw['np1'] = timeit_func(np.intersect1d, list1, list2)
#kw['py1'] = timeit_func(lambda a, b: set.intersection(set(a), set(b)), list1, list2)
kw['py2'] = timeit_func(lambda a, b: sorted(set.intersection(set(a), set(b))), list1, list2)
data.append(kw)
import pandas as pd
pd.options.display.max_rows = 1000
pd.options.display.width = 1000
df = pd.DataFrame.from_dict(data)
data_keys = list(grid.keys())
other_keys = ut.setdiff(df.columns, data_keys)
df = df.reindex_axis(data_keys + other_keys, axis=1)
df['abs_change'] = df['ut'] - df['py2']
df['pct_change'] = df['abs_change'] / df['ut'] * 100
#print(df.sort('abs_change', ascending=False))
print(str(df).split('\n')[0])
for row in df.values:
argmin = row[len(data_keys):len(data_keys) + len(other_keys)].argmin() + len(data_keys)
print(' ' + ', '.join([
'%6d' % (r) if x < len(data_keys) else (
ut.color_text('%8.6f' % (r,), 'blue')
if x == argmin else '%8.6f' % (r,))
for x, r in enumerate(row)
]))
%timeit ut.isect(list1, list2)
%timeit np.intersect1d(list1, list2, assume_unique=True)
%timeit set.intersection(set(list1), set(list2))
#def highlight_max(s):
# '''
# highlight the maximum in a Series yellow.
# '''
# is_max = s == s.max()
# return ['background-color: yellow' if v else '' for v in is_max]
#df.style.apply(highlight_max) |
def start(self):
"""
Starts the dependency manager
"""
self._context.add_service_listener(
self, self.requirement.filter, self.requirement.specification
) | Starts the dependency manager |
def getDelOps(self, buid):
'''
Get a list of storage operations to delete this property from the buid.
Args:
buid (bytes): The node buid.
Returns:
(tuple): The storage operations
'''
return (
('prop:del', (buid, self.form.name, self.name, self.storinfo)),
) | Get a list of storage operations to delete this property from the buid.
Args:
buid (bytes): The node buid.
Returns:
(tuple): The storage operations |
def from_css(Class, csstext, encoding=None, href=None, media=None, title=None, validate=None):
"""parse CSS text into a Styles object, using cssutils
"""
styles = Class()
cssStyleSheet = cssutils.parseString(csstext, encoding=encoding, href=href, media=media, title=title, validate=validate)
for rule in cssStyleSheet.cssRules:
if rule.type==cssutils.css.CSSRule.FONT_FACE_RULE:
if styles.get('@font-face') is None: styles['@font-face'] = []
styles['@font-face'].append(Class.styleProperties(rule.style))
elif rule.type==cssutils.css.CSSRule.IMPORT_RULE:
if styles.get('@import') is None: styles['@import'] = []
styles['@import'].append("url(%s)" % rule.href)
elif rule.type==cssutils.css.CSSRule.NAMESPACE_RULE:
if styles.get('@namespace') is None: styles['@namespace'] = {}
styles['@namespace'][rule.prefix] = rule.namespaceURI
elif rule.type==cssutils.css.CSSRule.MEDIA_RULE:
if styles.get('@media') is None: styles['@media'] = []
styles['@media'].append(rule.cssText)
elif rule.type==cssutils.css.CSSRule.PAGE_RULE:
if styles.get('@page') is None: styles['@page'] = []
styles['@page'].append(rule.cssText)
elif rule.type==cssutils.css.CSSRule.STYLE_RULE:
for selector in rule.selectorList:
sel = selector.selectorText
if sel not in styles:
styles[sel] = Class.styleProperties(rule.style)
elif rule.type==cssutils.css.CSSRule.CHARSET_RULE:
styles['@charset'] = rule.encoding
elif rule.type==cssutils.css.CSSRule.COMMENT: # comments are thrown away
pass
elif rule.type==cssutils.css.CSSRule.VARIABLES_RULE:
pass
else:
log.warning("Unknown rule type: %r" % rule.cssText)
return styles | parse CSS text into a Styles object, using cssutils |
def blob_handler(self, cmd):
"""Process a BlobCommand."""
# These never pass through directly. We buffer them and only
# output them if referenced by an interesting command.
self.blobs[cmd.id] = cmd
self.keep = False | Process a BlobCommand. |
def _algebraic_rules_scalar():
"""Set the default algebraic rules for scalars"""
a = wc("a", head=SCALAR_VAL_TYPES)
b = wc("b", head=SCALAR_VAL_TYPES)
x = wc("x", head=SCALAR_TYPES)
y = wc("y", head=SCALAR_TYPES)
z = wc("z", head=SCALAR_TYPES)
indranges__ = wc("indranges__", head=IndexRangeBase)
ScalarTimes._binary_rules.update(check_rules_dict([
('R001', (
pattern_head(a, b),
lambda a, b: a * b)),
('R002', (
pattern_head(x, x),
lambda x: x**2)),
('R003', (
pattern_head(Zero, x),
lambda x: Zero)),
('R004', (
pattern_head(x, Zero),
lambda x: Zero)),
('R005', (
pattern_head(
pattern(ScalarPower, x, y),
pattern(ScalarPower, x, z)),
lambda x, y, z: x**(y+z))),
('R006', (
pattern_head(x, pattern(ScalarPower, x, -1)),
lambda x: One)),
]))
ScalarPower._rules.update(check_rules_dict([
('R001', (
pattern_head(a, b),
lambda a, b: a**b)),
('R002', (
pattern_head(x, 0),
lambda x: One)),
('R003', (
pattern_head(x, 1),
lambda x: x)),
('R004', (
pattern_head(pattern(ScalarPower, x, y), z),
lambda x, y, z: x**(y*z))),
]))
def pull_constfactor_from_sum(x, y, indranges):
bound_symbols = set([r.index_symbol for r in indranges])
if len(x.free_symbols.intersection(bound_symbols)) == 0:
return x * ScalarIndexedSum.create(y, *indranges)
else:
raise CannotSimplify()
ScalarIndexedSum._rules.update(check_rules_dict([
('R001', ( # sum over zero -> zero
pattern_head(Zero, indranges__),
lambda indranges: Zero)),
('R002', ( # pull constant prefactor out of sum
pattern_head(pattern(ScalarTimes, x, y), indranges__),
lambda x, y, indranges:
pull_constfactor_from_sum(x, y, indranges))),
])) | Set the default algebraic rules for scalars |
def _unique_ordered_lines(line_numbers):
"""
Given a list of line numbers, return a list in which each line
number is included once and the lines are ordered sequentially.
"""
if len(line_numbers) == 0:
return []
# Ensure lines are unique by putting them in a set
line_set = set(line_numbers)
# Retrieve the list from the set, sort it, and return
return sorted([line for line in line_set]) | Given a list of line numbers, return a list in which each line
number is included once and the lines are ordered sequentially. |
def question_detail(request, topic_slug, slug):
"""
A detail view of a Question.
Simply redirects to a detail page for the related :model:`faq.Topic`
(:view:`faq.views.topic_detail`) with the addition of a fragment
identifier that links to the given :model:`faq.Question`.
E.g. ``/faq/topic-slug/#question-slug``.
"""
url = reverse('faq-topic-detail', kwargs={'slug': topic_slug})
return _fragmentify(Question, slug, url) | A detail view of a Question.
Simply redirects to a detail page for the related :model:`faq.Topic`
(:view:`faq.views.topic_detail`) with the addition of a fragment
identifier that links to the given :model:`faq.Question`.
E.g. ``/faq/topic-slug/#question-slug``. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict | Return a json dictionary representing this model. |
def invert_pixel_mask(mask):
'''Invert pixel mask (0->1, 1(and greater)->0).
Parameters
----------
mask : array-like
Mask.
Returns
-------
inverted_mask : array-like
Inverted Mask.
'''
inverted_mask = np.ones(shape=(80, 336), dtype=np.dtype('>u1'))
inverted_mask[mask >= 1] = 0
return inverted_mask | Invert pixel mask (0->1, 1(and greater)->0).
Parameters
----------
mask : array-like
Mask.
Returns
-------
inverted_mask : array-like
Inverted Mask. |
def _validate_arguments(self):
"""method to sanitize model parameters
Parameters
---------
None
Returns
-------
None
"""
if self._has_terms():
[term._validate_arguments() for term in self._terms]
return self | method to sanitize model parameters
Parameters
---------
None
Returns
-------
None |
def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Sleep a random amount of time between wait_random_min and wait_random_max"""
return random.randint(self._wait_random_min, self._wait_random_max) | Sleep a random amount of time between wait_random_min and wait_random_max |
def safe_read_file(file_path: Path) -> str:
"""Read a text file. Several text encodings are tried until
the file content is correctly decoded.
:raise GuesslangError: when the file encoding is not supported
:param file_path: path to the input file
:return: text file content
"""
for encoding in FILE_ENCODINGS:
try:
return file_path.read_text(encoding=encoding)
except UnicodeError:
pass # Ignore encoding error
raise GuesslangError('Encoding not supported for {!s}'.format(file_path)) | Read a text file. Several text encodings are tried until
the file content is correctly decoded.
:raise GuesslangError: when the file encoding is not supported
:param file_path: path to the input file
:return: text file content |
def get_fermi(self, c, T, rtol=0.01, nstep=50, step=0.1, precision=8):
"""
Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to c. A greedy algorithm is used where the
relative error is minimized by calculating the doping at a grid which
is continuously become finer.
Args:
c (float): doping concentration. c<0 represents n-type doping and
c>0 represents p-type doping (i.e. majority carriers are holes)
T (float): absolute temperature in Kelvin
rtol (float): maximum acceptable relative error
nstep (int): number of steps checked around a given fermi level
step (float): initial step in fermi level when searching
precision (int): essentially the decimal places of calculated fermi
Returns (float): the fermi level. Note that this is different from the
default dos.efermi.
"""
fermi = self.efermi # initialize target fermi
for _ in range(precision):
frange = np.arange(-nstep, nstep + 1) * step + fermi
calc_doping = np.array([self.get_doping(f, T) for f in frange])
relative_error = abs(calc_doping / c - 1.0)
fermi = frange[np.argmin(relative_error)]
step /= 10.0
if min(relative_error) > rtol:
raise ValueError('Could not find fermi within {}% of c={}'.format(
rtol * 100, c))
return fermi | Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to c. A greedy algorithm is used where the
relative error is minimized by calculating the doping at a grid which
is continuously become finer.
Args:
c (float): doping concentration. c<0 represents n-type doping and
c>0 represents p-type doping (i.e. majority carriers are holes)
T (float): absolute temperature in Kelvin
rtol (float): maximum acceptable relative error
nstep (int): number of steps checked around a given fermi level
step (float): initial step in fermi level when searching
precision (int): essentially the decimal places of calculated fermi
Returns (float): the fermi level. Note that this is different from the
default dos.efermi. |
def stl(A, b):
r"""Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``.
Solve linear systems :math:`\mathrm A \mathbf x = \mathbf b` when
:math:`\mathrm A` is a lower-triangular matrix.
Args:
A (array_like): A lower-triangular matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Solution ``x``.
See Also
--------
scipy.linalg.solve_triangular: Solve triangular linear equations.
"""
from scipy.linalg import solve_triangular
A = asarray(A, float)
b = asarray(b, float)
return solve_triangular(A, b, lower=True, check_finite=False) | r"""Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``.
Solve linear systems :math:`\mathrm A \mathbf x = \mathbf b` when
:math:`\mathrm A` is a lower-triangular matrix.
Args:
A (array_like): A lower-triangular matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Solution ``x``.
See Also
--------
scipy.linalg.solve_triangular: Solve triangular linear equations. |
def GetHostMemUsedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemUsedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | Undocumented. |
def get_table_info(conn, tablename):
"""Returns TableInfo object"""
r = conn.execute("pragma table_info('{}')".format(tablename))
ret = TableInfo(((row["name"], row) for row in r))
return ret | Returns TableInfo object |
def wait_for_element_not_present(self, locator):
"""
Synchronization helper to wait until some element is removed from the page
:raises: ElementVisiblityTimeout
"""
for i in range(timeout_seconds):
if self.driver.is_element_present(locator):
time.sleep(1)
else:
break
else:
raise ElementVisiblityTimeout("%s presence timed out" % locator)
return True | Synchronization helper to wait until some element is removed from the page
:raises: ElementVisiblityTimeout |
def autoset_id(self):
"""
If the :attr:`id_` already has a non-false (false is also the empty
string!) value, this method is a no-op.
Otherwise, the :attr:`id_` attribute is filled with
:data:`RANDOM_ID_BYTES` of random data, encoded by
:func:`aioxmpp.utils.to_nmtoken`.
.. note::
This method only works on subclasses of :class:`StanzaBase` which
define the :attr:`id_` attribute.
"""
try:
self.id_
except AttributeError:
pass
else:
if self.id_:
return
self.id_ = to_nmtoken(random.getrandbits(8*RANDOM_ID_BYTES)) | If the :attr:`id_` already has a non-false (false is also the empty
string!) value, this method is a no-op.
Otherwise, the :attr:`id_` attribute is filled with
:data:`RANDOM_ID_BYTES` of random data, encoded by
:func:`aioxmpp.utils.to_nmtoken`.
.. note::
This method only works on subclasses of :class:`StanzaBase` which
define the :attr:`id_` attribute. |
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip | Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid. |
def generate(self):
"""
Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}">
"""
answer = self.rand.randrange(self.max)
answer = str(answer).zfill(self.digits)
image_data = self.image_generator.generate(answer)
base64_captcha = base64.b64encode(image_data.getvalue()).decode("ascii")
logging.debug('Generated captcha with answer: ' + answer)
session['captcha_answer'] = answer
return base64_captcha | Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}"> |
def populate(self, priority, address, rtr, data):
"""
-DB1 last 2 bits = channel
-DB1 first 6 bist = pulses
-DB2-5 = pulse counter
-DB6-7 = ms/pulse
:return: None
"""
assert isinstance(data, bytes)
self.needs_no_rtr(rtr)
self.needs_data(data, 7)
self.set_attributes(priority, address, rtr)
self.channel = (data[0] & 0x03) +1
self.pulses = (data[0] >> 2) * 100
self.counter = (data[1] << 24) + (data[2] << 16) + (data[3] << 8) + data[4]
self.delay = (data[5] << 8) + data[6] | -DB1 last 2 bits = channel
-DB1 first 6 bist = pulses
-DB2-5 = pulse counter
-DB6-7 = ms/pulse
:return: None |
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
# in case there's no data
f = np.array([-2] * len(d.Time))
for l in labs:
ind = f == l
d.filt.add(name=name + '_{:.0f}'.format(l),
filt=ind,
info=name + ' ' + c.method + ' classifier',
params=(c.analytes, c.method))
prog.update()
return name | Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str |
def plot_ranges_from_cli(opts):
"""Parses the mins and maxs arguments from the `plot_posterior` option
group.
Parameters
----------
opts : ArgumentParser
The parsed arguments from the command line.
Returns
-------
mins : dict
Dictionary of parameter name -> specified mins. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
maxs : dict
Dictionary of parameter name -> specified maxs. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
"""
mins = {}
for x in opts.mins:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --mins not specified correctly; see help")
mins[x[0]] = float(x[1])
maxs = {}
for x in opts.maxs:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --maxs not specified correctly; see help")
maxs[x[0]] = float(x[1])
return mins, maxs | Parses the mins and maxs arguments from the `plot_posterior` option
group.
Parameters
----------
opts : ArgumentParser
The parsed arguments from the command line.
Returns
-------
mins : dict
Dictionary of parameter name -> specified mins. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
maxs : dict
Dictionary of parameter name -> specified maxs. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary. |
def dist_iter(self, g_nums, ats_1, ats_2, invalid_error=False):
""" Iterator over selected interatomic distances.
Distances are in Bohrs as with :meth:`dist_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dist
|npfloat_| --
Interatomic distance in Bohrs between each atom pair of
`ats_1` and `ats_2` from the corresponding geometries
of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
"""
# Imports
import numpy as np
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples. If 'None' expansion
# was used, return None for any invalid indices instead of raising
# an exception.
for tup in tups:
yield self._iter_return(tup, self.dist_single, invalid_error) | Iterator over selected interatomic distances.
Distances are in Bohrs as with :meth:`dist_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dist
|npfloat_| --
Interatomic distance in Bohrs between each atom pair of
`ats_1` and `ats_2` from the corresponding geometries
of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length. |
def add_contact(self, phone_number: str, first_name: str, last_name: str=None, on_success: callable=None):
"""
Add contact by phone number and name (last_name is optional).
:param phone: Valid phone number for contact.
:param first_name: First name to use.
:param last_name: Last name to use. Optional.
:param on_success: Callback to call when adding, will contain success status and the current contact list.
"""
pass | Add contact by phone number and name (last_name is optional).
:param phone: Valid phone number for contact.
:param first_name: First name to use.
:param last_name: Last name to use. Optional.
:param on_success: Callback to call when adding, will contain success status and the current contact list. |
def monthly_mean_at_each_ind(monthly_means, sub_monthly_timeseries):
"""Copy monthly mean over each time index in that month.
Parameters
----------
monthly_means : xarray.DataArray
array of monthly means
sub_monthly_timeseries : xarray.DataArray
array of a timeseries at sub-monthly time resolution
Returns
-------
xarray.DataArray with eath monthly mean value from `monthly_means` repeated
at each time within that month from `sub_monthly_timeseries`
See Also
--------
monthly_mean_ts : Create timeseries of monthly mean values
"""
time = monthly_means[TIME_STR]
start = time.indexes[TIME_STR][0].replace(day=1, hour=0)
end = time.indexes[TIME_STR][-1]
new_indices = pd.DatetimeIndex(start=start, end=end, freq='MS')
arr_new = monthly_means.reindex(time=new_indices, method='backfill')
return arr_new.reindex_like(sub_monthly_timeseries, method='pad') | Copy monthly mean over each time index in that month.
Parameters
----------
monthly_means : xarray.DataArray
array of monthly means
sub_monthly_timeseries : xarray.DataArray
array of a timeseries at sub-monthly time resolution
Returns
-------
xarray.DataArray with eath monthly mean value from `monthly_means` repeated
at each time within that month from `sub_monthly_timeseries`
See Also
--------
monthly_mean_ts : Create timeseries of monthly mean values |
def matlab_compatible(name):
""" make a channel name compatible with Matlab variable naming
Parameters
----------
name : str
channel name
Returns
-------
compatible_name : str
channel name compatible with Matlab
"""
compatible_name = [ch if ch in ALLOWED_MATLAB_CHARS else "_" for ch in name]
compatible_name = "".join(compatible_name)
if compatible_name[0] not in string.ascii_letters:
compatible_name = "M_" + compatible_name
# max variable name is 63 and 3 chars are reserved
# for get_unique_name in case of multiple channel name occurence
return compatible_name[:60] | make a channel name compatible with Matlab variable naming
Parameters
----------
name : str
channel name
Returns
-------
compatible_name : str
channel name compatible with Matlab |
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
# TODO: check the extension here if it's valid IAR project or we
# should at least check if syntax is correct check something IAR defines and return error if not
project_file = join(getcwd(), project_file)
ewp_dic = xmltodict.parse(file(project_file), dict_constructor=dict)
mcu = MCU_TEMPLATE
try:
ewp_dic['project']['configuration']
except KeyError:
# validity check for iar project
logging.debug("The project_file %s seems to be not valid .ewp file.")
return mcu
# Fill in only must-have values, fpu will be added if defined for mcu
mcu['tool_specific'] = {
'iar' : {
# MCU selection
'OGChipSelectEditMenu' : {
'state' : [],
},
# we use mcu
'OGCoreOrChip' : {
'state' : [1],
},
}
}
# we take 0 configuration or just configuration, as multiple configuration possible
# debug, release, for mcu - does not matter, try and adjust
try:
index_general = self._get_option(ewp_dic['project']['configuration'][0]['settings'], 'General')
configuration = ewp_dic['project']['configuration'][0]
except KeyError:
index_general = self._get_option(ewp_dic['project']['configuration']['settings'], 'General')
configuration = ewp_dic['project']['configuration']
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'OGChipSelectEditMenu')
OGChipSelectEditMenu = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['OGChipSelectEditMenu']['state'].append(OGChipSelectEditMenu['state'].replace('\t', ' ', 1))
# we keep this as the internal version. FPU - version 1, FPU2 version 2.
# TODO:We shall look at IAR versioning to get this right
fileVersion = 1
try:
if self._get_option(configuration['settings'][index_general]['data']['option'], 'FPU2'):
fileVersion = 2
except TypeError:
pass
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GBECoreSlave')
GBECoreSlave = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GBECoreSlave'] = { 'state': [int(GBECoreSlave['state'])] }
if fileVersion == 2:
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GFPUCoreSlave2')
GFPUCoreSlave2 = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GFPUCoreSlave2'] = { 'state': [int(GFPUCoreSlave2['state'])] }
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'CoreVariant')
CoreVariant = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['CoreVariant'] = { 'state': [int(CoreVariant['state'])] }
else:
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GFPUCoreSlave')
GFPUCoreSlave = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GFPUCoreSlave'] = { 'state': [int(GFPUCoreSlave['state'])] }
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'Variant')
Variant = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['Variant'] = { 'state': [int(Variant['state'])] }
return mcu | Parse project file to get mcu definition |
def _get_f2rx(self, C, r_x, r_1, r_2):
"""
Defines the f2 scaling coefficient defined in equation 10
"""
drx = (r_x - r_1) / (r_2 - r_1)
return self.CONSTS["h4"] + (C["h5"] * drx) + (C["h6"] * (drx ** 2.)) | Defines the f2 scaling coefficient defined in equation 10 |
def warn_on_var_indirection(self) -> bool:
"""If True, warn when a Var reference cannot be direct linked (iff
use_var_indirection is False).."""
return not self.use_var_indirection and self._opts.entry(
WARN_ON_VAR_INDIRECTION, True
) | If True, warn when a Var reference cannot be direct linked (iff
use_var_indirection is False).. |
def _handleCallInitiated(self, regexMatch, callId=None, callType=1):
""" Handler for "outgoing call initiated" event notification line """
if self._dialEvent:
if regexMatch:
groups = regexMatch.groups()
# Set self._dialReponse to (callId, callType)
if len(groups) >= 2:
self._dialResponse = (int(groups[0]) , int(groups[1]))
else:
self._dialResponse = (int(groups[0]), 1) # assume call type: VOICE
else:
self._dialResponse = callId, callType
self._dialEvent.set() | Handler for "outgoing call initiated" event notification line |
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data["properties"]
raw_content = properties.get("addressSpace", None)
if raw_content is not None:
address_space = AddressSpace.from_raw_data(raw_content)
properties["addressSpace"] = address_space
raw_content = properties.get("dhcpOptions")
if raw_content is not None:
dhcp_options = DHCPOptions.from_raw_data(raw_content)
properties["dhcpOptions"] = dhcp_options
raw_content = properties.get("logicalNetwork", None)
if raw_content is not None:
properties["logicalNetwork"] = Resource.from_raw_data(raw_content)
subnetworks = []
for raw_subnet in properties.get("subnets", []):
raw_subnet["parentResourceID"] = raw_data["resourceId"]
subnetworks.append(SubNetworks.from_raw_data(raw_subnet))
properties["subnets"] = subnetworks
return super(VirtualNetworks, cls).process_raw_data(raw_data) | Create a new model using raw API response. |
def _trade(self, security, price=0, amount=0, volume=0, entrust_bs="buy"):
"""
调仓
:param security:
:param price:
:param amount:
:param volume:
:param entrust_bs:
:return:
"""
stock = self._search_stock_info(security)
balance = self.get_balance()[0]
if stock is None:
raise exceptions.TradeError(u"没有查询要操作的股票信息")
if not volume:
volume = int(float(price) * amount) # 可能要取整数
if balance["current_balance"] < volume and entrust_bs == "buy":
raise exceptions.TradeError(u"没有足够的现金进行操作")
if stock["flag"] != 1:
raise exceptions.TradeError(u"未上市、停牌、涨跌停、退市的股票无法操作。")
if volume == 0:
raise exceptions.TradeError(u"操作金额不能为零")
# 计算调仓调仓份额
weight = volume / balance["asset_balance"] * 100
weight = round(weight, 2)
# 获取原有仓位信息
position_list = self._get_position()
# 调整后的持仓
is_have = False
for position in position_list:
if position["stock_id"] == stock["stock_id"]:
is_have = True
position["proactive"] = True
old_weight = position["weight"]
if entrust_bs == "buy":
position["weight"] = weight + old_weight
else:
if weight > old_weight:
raise exceptions.TradeError(u"操作数量大于实际可卖出数量")
else:
position["weight"] = old_weight - weight
position["weight"] = round(position["weight"], 2)
if not is_have:
if entrust_bs == "buy":
position_list.append(
{
"code": stock["code"],
"name": stock["name"],
"enName": stock["enName"],
"hasexist": stock["hasexist"],
"flag": stock["flag"],
"type": stock["type"],
"current": stock["current"],
"chg": stock["chg"],
"percent": str(stock["percent"]),
"stock_id": stock["stock_id"],
"ind_id": stock["ind_id"],
"ind_name": stock["ind_name"],
"ind_color": stock["ind_color"],
"textname": stock["name"],
"segment_name": stock["ind_name"],
"weight": round(weight, 2),
"url": "/S/" + stock["code"],
"proactive": True,
"price": str(stock["current"]),
}
)
else:
raise exceptions.TradeError(u"没有持有要卖出的股票")
if entrust_bs == "buy":
cash = (
(balance["current_balance"] - volume)
/ balance["asset_balance"]
* 100
)
else:
cash = (
(balance["current_balance"] + volume)
/ balance["asset_balance"]
* 100
)
cash = round(cash, 2)
log.debug("weight:%f, cash:%f", weight, cash)
data = {
"cash": cash,
"holdings": str(json.dumps(position_list)),
"cube_symbol": str(self.account_config["portfolio_code"]),
"segment": 1,
"comment": "",
}
try:
resp = self.s.post(self.config["rebalance_url"], data=data)
# pylint: disable=broad-except
except Exception as e:
log.warning("调仓失败: %s ", e)
return None
else:
log.debug(
"调仓 %s%s: %d", entrust_bs, stock["name"], resp.status_code
)
resp_json = json.loads(resp.text)
if "error_description" in resp_json and resp.status_code != 200:
log.error("调仓错误: %s", resp_json["error_description"])
return [
{
"error_no": resp_json["error_code"],
"error_info": resp_json["error_description"],
}
]
return [
{
"entrust_no": resp_json["id"],
"init_date": self._time_strftime(resp_json["created_at"]),
"batch_no": "委托批号",
"report_no": "申报号",
"seat_no": "席位编号",
"entrust_time": self._time_strftime(
resp_json["updated_at"]
),
"entrust_price": price,
"entrust_amount": amount,
"stock_code": security,
"entrust_bs": "买入",
"entrust_type": "雪球虚拟委托",
"entrust_status": "-",
}
] | 调仓
:param security:
:param price:
:param amount:
:param volume:
:param entrust_bs:
:return: |
def configure(cls, name, config, prefix='depot.'):
"""Configures an application depot.
This configures the application wide depot from a settings dictionary.
The settings dictionary is usually loaded from an application configuration
file where all the depot options are specified with a given ``prefix``.
The default ``prefix`` is *depot.*, the minimum required setting
is ``depot.backend`` which specified the required backend for files storage.
Additional options depend on the choosen backend.
"""
if name in cls._depots:
raise RuntimeError('Depot %s has already been configured' % (name,))
if cls._default_depot is None:
cls._default_depot = name
cls._depots[name] = cls.from_config(config, prefix)
return cls._depots[name] | Configures an application depot.
This configures the application wide depot from a settings dictionary.
The settings dictionary is usually loaded from an application configuration
file where all the depot options are specified with a given ``prefix``.
The default ``prefix`` is *depot.*, the minimum required setting
is ``depot.backend`` which specified the required backend for files storage.
Additional options depend on the choosen backend. |
def decrypt(self, data):
"""
Decrypts an encrypted (SK, 46) IKE payload using self.SK_er
:param data: Encrypted IKE payload including headers (payloads.SK())
:return: next_payload, data_containing_payloads
:raise IkeError: If packet is corrupted.
"""
next_payload, is_critical, payload_len = const.PAYLOAD_HEADER.unpack(data[:const.PAYLOAD_HEADER.size])
next_payload = payloads.Type(next_payload)
logger.debug("next payload: {!r}".format(next_payload))
try:
iv_len = 16
iv = bytes(data[const.PAYLOAD_HEADER.size:const.PAYLOAD_HEADER.size + iv_len])
ciphertext = bytes(data[const.PAYLOAD_HEADER.size + iv_len:payload_len]) # HMAC size
except IndexError:
raise IkeError('Unable to decrypt: Malformed packet')
logger.debug('IV: {}'.format(dump(iv)))
logger.debug('CIPHERTEXT: {}'.format(dump(ciphertext)))
# Decrypt
cipher = Camellia(self.SK_er, iv=iv)
decrypted = cipher.decrypt(ciphertext)
logger.debug("Decrypted packet from responder: {}".format(dump(decrypted)))
return next_payload, decrypted | Decrypts an encrypted (SK, 46) IKE payload using self.SK_er
:param data: Encrypted IKE payload including headers (payloads.SK())
:return: next_payload, data_containing_payloads
:raise IkeError: If packet is corrupted. |
def _get_options(ret=None):
'''
Returns options used for the MySQL connection.
'''
defaults = {'host': 'salt',
'user': 'salt',
'pass': 'salt',
'db': 'salt',
'port': 3306,
'ssl_ca': None,
'ssl_cert': None,
'ssl_key': None}
attrs = {'host': 'host',
'user': 'user',
'pass': 'pass',
'db': 'db',
'port': 'port',
'ssl_ca': 'ssl_ca',
'ssl_cert': 'ssl_cert',
'ssl_key': 'ssl_key'}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
# post processing
for k, v in six.iteritems(_options):
if isinstance(v, six.string_types) and v.lower() == 'none':
# Ensure 'None' is rendered as None
_options[k] = None
if k == 'port':
# Ensure port is an int
_options[k] = int(v)
return _options | Returns options used for the MySQL connection. |
def setClockShowDate(kvalue, **kwargs):
'''
Set whether the date is visible in the clock
CLI Example:
.. code-block:: bash
salt '*' gnome.setClockShowDate <True|False> user=<username>
'''
if kvalue is not True and kvalue is not False:
return False
_gsession = _GSettings(user=kwargs.get('user'),
schema='org.gnome.desktop.interface',
key='clock-show-date')
return _gsession._set(kvalue) | Set whether the date is visible in the clock
CLI Example:
.. code-block:: bash
salt '*' gnome.setClockShowDate <True|False> user=<username> |
async def clear(self, using_db=None) -> None:
"""
Clears ALL relations.
"""
db = using_db if using_db else self.model._meta.db
through_table = Table(self.field.through)
query = (
db.query_class.from_(through_table)
.where(getattr(through_table, self.field.backward_key) == self.instance.id)
.delete()
)
await db.execute_query(str(query)) | Clears ALL relations. |
def _getArrays(items, attr, defaultValue):
"""Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...}
"""
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue))
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key])
return arrays | Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...} |
def merge_leaderboards(self, destination, keys, aggregate='SUM'):
'''
Merge leaderboards given by keys with this leaderboard into a named destination leaderboard.
@param destination [String] Destination leaderboard name.
@param keys [Array] Leaderboards to be merged with the current leaderboard.
@param options [Hash] Options for merging the leaderboards.
'''
keys.insert(0, self.leaderboard_name)
self.redis_connection.zunionstore(destination, keys, aggregate) | Merge leaderboards given by keys with this leaderboard into a named destination leaderboard.
@param destination [String] Destination leaderboard name.
@param keys [Array] Leaderboards to be merged with the current leaderboard.
@param options [Hash] Options for merging the leaderboards. |
def load_cml(cml_filename):
"""Load the molecules from a CML file
Argument:
| ``cml_filename`` -- The filename of a CML file.
Returns a list of molecule objects with optional molecular graph
attribute and extra attributes.
"""
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
dh = CMLMoleculeLoader()
parser.setContentHandler(dh)
parser.parse(cml_filename)
return dh.molecules | Load the molecules from a CML file
Argument:
| ``cml_filename`` -- The filename of a CML file.
Returns a list of molecule objects with optional molecular graph
attribute and extra attributes. |
def set_uid(self):
"""Change the user of the running process"""
if self.user:
uid = getpwnam(self.user).pw_uid
try:
os.setuid(uid)
except Exception:
message = ('Unable to switch ownership to {0}:{1}. ' +
'Did you start the daemon as root?')
print(message.format(self.user, self.group))
sys.exit(1) | Change the user of the running process |
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self | Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone. |
def copy(self, filename, id_=-1, pre_callback=None, post_callback=None):
"""Copy a package or script to all repos.
Determines appropriate location (for file shares) and type based
on file extension.
Args:
filename: String path to the local file to copy.
id_: Package or Script object ID to target. For use with JDS
and CDP DP's only. If uploading a package that does not
have a corresponding object, use id_ of -1, which is the
default.
pre_callback: Func to call before each distribution point
starts copying. Should accept a Repository connection
dictionary as a parameter. Will be called like:
`pre_callback(repo.connection)`
post_callback: Func to call after each distribution point
finishes copying. Should accept a Repository connection
dictionary as a parameter. Will be called like:
`pre_callback(repo.connection)`
"""
for repo in self._children:
if is_package(filename):
copy_method = repo.copy_pkg
else:
# All other file types can go to scripts.
copy_method = repo.copy_script
if pre_callback:
pre_callback(repo.connection)
copy_method(filename, id_)
if post_callback:
post_callback(repo.connection) | Copy a package or script to all repos.
Determines appropriate location (for file shares) and type based
on file extension.
Args:
filename: String path to the local file to copy.
id_: Package or Script object ID to target. For use with JDS
and CDP DP's only. If uploading a package that does not
have a corresponding object, use id_ of -1, which is the
default.
pre_callback: Func to call before each distribution point
starts copying. Should accept a Repository connection
dictionary as a parameter. Will be called like:
`pre_callback(repo.connection)`
post_callback: Func to call after each distribution point
finishes copying. Should accept a Repository connection
dictionary as a parameter. Will be called like:
`pre_callback(repo.connection)` |
def authenticate(cmd_args, endpoint='', force=False):
"""Returns an OAuth token that can be passed to the server for
identification. If FORCE is False, it will attempt to use a cached token
or refresh the OAuth token.
"""
server = server_url(cmd_args)
network.check_ssl()
access_token = None
try:
assert not force
access_token = refresh_local_token(server)
except Exception:
print('Performing authentication')
access_token = perform_oauth(get_code, cmd_args, endpoint)
email = display_student_email(cmd_args, access_token)
if not email:
log.warning('Could not get login email. Try logging in again.')
log.debug('Authenticated with access token={}'.format(access_token))
return access_token | Returns an OAuth token that can be passed to the server for
identification. If FORCE is False, it will attempt to use a cached token
or refresh the OAuth token. |
def gps_0(self):
"""
GPS position information (:py:class:`GPSInfo`).
"""
return GPSInfo(self._eph, self._epv, self._fix_type, self._satellites_visible) | GPS position information (:py:class:`GPSInfo`). |
def _next_dir_gen(self, root):
"""Generator for next directory element in the document.
Args:
root: root element in the XML tree.
Yields:
GCSFileStat for the next directory.
"""
for e in root.getiterator(common._T_COMMON_PREFIXES):
yield common.GCSFileStat(
self._path + '/' + e.find(common._T_PREFIX).text,
st_size=None, etag=None, st_ctime=None, is_dir=True)
e.clear()
yield None | Generator for next directory element in the document.
Args:
root: root element in the XML tree.
Yields:
GCSFileStat for the next directory. |
def cell(self, row_idx, col_idx):
"""Return cell at *row_idx*, *col_idx*.
Return value is an instance of |_Cell|. *row_idx* and *col_idx* are
zero-based, e.g. cell(0, 0) is the top, left cell in the table.
"""
return _Cell(self._tbl.tc(row_idx, col_idx), self) | Return cell at *row_idx*, *col_idx*.
Return value is an instance of |_Cell|. *row_idx* and *col_idx* are
zero-based, e.g. cell(0, 0) is the top, left cell in the table. |
def individuals(self, ind_ids=None):
"""Return information about individuals
Args:
ind_ids (list(str)): List of individual ids
Returns:
individuals (Iterable): Iterable with Individuals
"""
if ind_ids:
for ind_id in ind_ids:
for ind in self.individual_objs:
if ind.ind_id == ind_id:
yield ind
else:
for ind in self.individual_objs:
yield ind | Return information about individuals
Args:
ind_ids (list(str)): List of individual ids
Returns:
individuals (Iterable): Iterable with Individuals |
def write(self, handle):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
# POINT group
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', self._point_scale)
add('RATE', '3d data capture rate', 4, '<f', self._point_rate)
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units', self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ''.join('M%03d ' % i for i in range(ppf)), 5, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
group = self.add_group(2, 'ANALOG', 'ANALOG group')
add('USED', 'analog channel count', 2, '<H', analog.shape[0])
add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1])
add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale)
add_empty_array('SCALE', 'analog channel scale factors', 4)
add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = np.prod(analog.shape)
self.header.analog_per_frame = analog.shape[0]
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle) | Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle. |
def run(**options):
"""
_run_
Run the dockerstache process to render templates
based on the options provided
If extend_context is passed as options it will be used to
extend the context with the contents of the dictionary provided
via context.update(extend_context)
"""
with Dotfile(options) as conf:
if conf['context'] is None:
msg = "No context file has been provided"
LOGGER.error(msg)
raise RuntimeError(msg)
if not os.path.exists(conf['context_path']):
msg = "Context file {} not found".format(conf['context_path'])
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info(
(
"{{dockerstache}}: In: {}\n"
"{{dockerstache}}: Out: {}\n"
"{{dockerstache}}: Context: {}\n"
"{{dockerstache}}: Defaults: {}\n"
).format(conf['input'], conf['output'], conf['context'], conf['defaults'])
)
context = Context(conf['context'], conf['defaults'])
context.load()
if 'extend_context' in options:
LOGGER.info("{{dockerstache}} Extended context provided")
context.update(options['extend_context'])
process_templates(
conf['input'],
conf['output'],
context
)
if conf['inclusive']:
process_copies(
conf['input'],
conf['output'],
conf['exclude']
)
return dict(conf) | _run_
Run the dockerstache process to render templates
based on the options provided
If extend_context is passed as options it will be used to
extend the context with the contents of the dictionary provided
via context.update(extend_context) |
async def get_status(self, filters=None, utc=False):
"""Return the status of the model.
:param str filters: Optional list of applications, units, or machines
to include, which can use wildcards ('*').
:param bool utc: Display time as UTC in RFC3339 format
"""
client_facade = client.ClientFacade.from_connection(self.connection())
return await client_facade.FullStatus(filters) | Return the status of the model.
:param str filters: Optional list of applications, units, or machines
to include, which can use wildcards ('*').
:param bool utc: Display time as UTC in RFC3339 format |
def pad_shape_right_with_ones(x, ndims):
"""Maybe add `ndims` ones to `x.shape` on the right.
If `ndims` is zero, this is a no-op; otherwise, we will create and return a
new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the
right side. If the shape of `x` is known statically, the shape of the return
value will be as well.
Args:
x: The `Tensor` we'll return a reshaping of.
ndims: Python `integer` number of ones to pad onto `x.shape`.
Returns:
If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`
with `ndims` ones concatenated on the right side. If possible, returns a
`Tensor` whose shape is known statically.
Raises:
ValueError: if `ndims` is not a Python `integer` greater than or equal to
zero.
"""
if not (isinstance(ndims, int) and ndims >= 0):
raise ValueError(
'`ndims` must be a Python `integer` greater than zero. Got: {}'
.format(ndims))
if ndims == 0:
return x
x = tf.convert_to_tensor(value=x)
original_shape = x.shape
new_shape = distribution_util.pad(
tf.shape(input=x), axis=0, back=True, value=1, count=ndims)
x = tf.reshape(x, new_shape)
x.set_shape(original_shape.concatenate([1]*ndims))
return x | Maybe add `ndims` ones to `x.shape` on the right.
If `ndims` is zero, this is a no-op; otherwise, we will create and return a
new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the
right side. If the shape of `x` is known statically, the shape of the return
value will be as well.
Args:
x: The `Tensor` we'll return a reshaping of.
ndims: Python `integer` number of ones to pad onto `x.shape`.
Returns:
If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`
with `ndims` ones concatenated on the right side. If possible, returns a
`Tensor` whose shape is known statically.
Raises:
ValueError: if `ndims` is not a Python `integer` greater than or equal to
zero. |
def get(self, sid):
"""
Constructs a CredentialListContext
:param sid: Fetch by unique credential list Sid
:returns: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext
"""
return CredentialListContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | Constructs a CredentialListContext
:param sid: Fetch by unique credential list Sid
:returns: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.CredentialListContext |
def fields2jsonschema(self, fields, schema=None, use_refs=True, dump=True, name=None):
"""Return the JSON Schema Object for a given marshmallow
:class:`Schema <marshmallow.Schema>`. Schema may optionally provide the ``title`` and
``description`` class Meta options.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#schemaObject
Example: ::
class UserSchema(Schema):
_id = fields.Int()
email = fields.Email(description='email address of the user')
name = fields.Str()
class Meta:
title = 'User'
description = 'A registered user'
OpenAPI.schema2jsonschema(UserSchema)
# {
# 'title': 'User', 'description': 'A registered user',
# 'properties': {
# 'name': {'required': False,
# 'description': '',
# 'type': 'string'},
# '_id': {'format': 'int32',
# 'required': False,
# 'description': '',
# 'type': 'integer'},
# 'email': {'format': 'email',
# 'required': False,
# 'description': 'email address of the user',
# 'type': 'string'}
# }
# }
:param Schema schema: A marshmallow Schema instance or a class object
:rtype: dict, a JSON Schema Object
"""
Meta = getattr(schema, 'Meta', None)
if getattr(Meta, 'additional', None):
declared_fields = set(schema._declared_fields.keys())
if set(getattr(Meta, 'additional', set())) > declared_fields:
import warnings
warnings.warn(
'Only explicitly-declared fields will be included in the Schema Object. '
'Fields defined in Meta.fields or Meta.additional are ignored.',
)
jsonschema = {
'type': 'object',
'properties': (OrderedLazyDict() if getattr(Meta, 'ordered', None)
else LazyDict()),
}
exclude = set(getattr(Meta, 'exclude', []))
for field_name, field_obj in iteritems(fields):
if field_name in exclude or (field_obj.dump_only and not dump):
continue
observed_field_name = self._observed_name(field_obj, field_name)
prop_func = lambda field_obj=field_obj: self.field2property( # flake8: noqa
field_obj, use_refs=use_refs, dump=dump, name=name,
)
jsonschema['properties'][observed_field_name] = prop_func
partial = getattr(schema, 'partial', None)
if field_obj.required:
if not partial or (is_collection(partial) and field_name not in partial):
jsonschema.setdefault('required', []).append(observed_field_name)
if 'required' in jsonschema:
jsonschema['required'].sort()
if Meta is not None:
if hasattr(Meta, 'title'):
jsonschema['title'] = Meta.title
if hasattr(Meta, 'description'):
jsonschema['description'] = Meta.description
if getattr(schema, 'many', False):
jsonschema = {
'type': 'array',
'items': jsonschema,
}
return jsonschema | Return the JSON Schema Object for a given marshmallow
:class:`Schema <marshmallow.Schema>`. Schema may optionally provide the ``title`` and
``description`` class Meta options.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#schemaObject
Example: ::
class UserSchema(Schema):
_id = fields.Int()
email = fields.Email(description='email address of the user')
name = fields.Str()
class Meta:
title = 'User'
description = 'A registered user'
OpenAPI.schema2jsonschema(UserSchema)
# {
# 'title': 'User', 'description': 'A registered user',
# 'properties': {
# 'name': {'required': False,
# 'description': '',
# 'type': 'string'},
# '_id': {'format': 'int32',
# 'required': False,
# 'description': '',
# 'type': 'integer'},
# 'email': {'format': 'email',
# 'required': False,
# 'description': 'email address of the user',
# 'type': 'string'}
# }
# }
:param Schema schema: A marshmallow Schema instance or a class object
:rtype: dict, a JSON Schema Object |
def parser():
"""Return search query parser."""
query_parser = current_app.config['COLLECTIONS_QUERY_PARSER']
if isinstance(query_parser, six.string_types):
query_parser = import_string(query_parser)
return query_parser | Return search query parser. |
def _really_start_hb(self):
"""callback for delayed heartbeat start
Only start the hb loop if we haven't been closed during the wait.
"""
if self._beating and not self.hb_stream.closed():
self._hb_periodic_callback.start() | callback for delayed heartbeat start
Only start the hb loop if we haven't been closed during the wait. |
def find_permission_view_menu(self, permission_name, view_menu_name):
"""
Finds and returns a PermissionView by names
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
if permission and view_menu:
return self.permissionview_model.objects(
permission=permission, view_menu=view_menu
).first() | Finds and returns a PermissionView by names |
def format(self, exclude_class=False):
"""Format this exception as a string including class name.
Args:
exclude_class (bool): Whether to exclude the exception class
name when formatting this exception
Returns:
string: a multiline string with the message, class name and
key value parameters passed to create the exception.
"""
if exclude_class:
msg = self.msg
else:
msg = "%s: %s" % (self.__class__.__name__, self.msg)
if len(self.params) != 0:
paramstring = "\n".join([str(key) + ": " + str(val) for key, val in self.params.items()])
msg += "\nAdditional Information:\n" + paramstring
return msg | Format this exception as a string including class name.
Args:
exclude_class (bool): Whether to exclude the exception class
name when formatting this exception
Returns:
string: a multiline string with the message, class name and
key value parameters passed to create the exception. |
def _load_data(self, group, record_offset=0, record_count=None):
""" get group's data block bytes"""
has_yielded = False
offset = 0
_count = record_count
channel_group = group.channel_group
if group.data_location == v23c.LOCATION_ORIGINAL_FILE:
# go to the first data block of the current data group
stream = self._file
else:
stream = self._tempfile
record_offset *= channel_group.samples_byte_nr
# go to the first data block of the current data group
if group.sorted:
samples_size = channel_group.samples_byte_nr
if not samples_size:
yield b"", 0, _count
has_yielded = True
else:
if self._read_fragment_size:
split_size = self._read_fragment_size // samples_size
split_size *= samples_size
else:
channels_nr = len(group.channels)
y_axis = CONVERT
idx = searchsorted(CHANNEL_COUNT, channels_nr, side="right") - 1
if idx < 0:
idx = 0
split_size = y_axis[idx]
split_size = split_size // samples_size
split_size *= samples_size
if split_size == 0:
split_size = samples_size
blocks = iter(group.data_blocks)
cur_size = 0
data = []
while True:
try:
info = next(blocks)
address, size = info.address, info.size
current_address = address
except StopIteration:
break
if offset + size < record_offset + 1:
offset += size
continue
stream.seek(address)
if offset < record_offset:
delta = record_offset - offset
stream.read(delta)
current_address += delta
size -= delta
offset = record_offset
while size >= split_size - cur_size:
stream.seek(current_address)
if data:
data.append(stream.read(split_size - cur_size))
yield b"".join(data), offset, _count
has_yielded = True
current_address += split_size - cur_size
else:
yield stream.read(split_size), offset, _count
has_yielded = True
current_address += split_size
offset += split_size
size -= split_size - cur_size
data = []
cur_size = 0
if size:
stream.seek(current_address)
data.append(stream.read(size))
cur_size += size
offset += size
if data:
yield b"".join(data), offset, _count
has_yielded = True
elif not offset:
yield b"", 0, _count
has_yielded = True
if not has_yielded:
yield b"", 0, _count
else:
record_id = group.channel_group.record_id
cg_size = group.record_size
if group.data_group.record_id_len <= 2:
record_id_nr = group.data_group.record_id_len
else:
record_id_nr = 0
cg_data = []
blocks = group.data_blocks
for info in blocks:
address, size = info.address, info.size
stream.seek(address)
data = stream.read(size)
i = 0
while i < size:
rec_id = data[i]
# skip record id
i += 1
rec_size = cg_size[rec_id]
if rec_id == record_id:
rec_data = data[i : i + rec_size]
cg_data.append(rec_data)
# consider the second record ID if it exists
if record_id_nr == 2:
i += rec_size + 1
else:
i += rec_size
cg_data = b"".join(cg_data)
size = len(cg_data)
if size:
if offset + size < record_offset + 1:
offset += size
continue
if offset < record_offset:
delta = record_offset - offset
size -= delta
offset = record_offset
yield cg_data, offset, _count
has_yielded = True
offset += size
if not has_yielded:
yield b"", 0, _count | get group's data block bytes |
def detect_phantomjs(version='2.1'):
''' Detect if PhantomJS is avaiable in PATH, at a minimum version.
Args:
version (str, optional) :
Required minimum version for PhantomJS (mostly for testing)
Returns:
str, path to PhantomJS
'''
if settings.phantomjs_path() is not None:
phantomjs_path = settings.phantomjs_path()
else:
if hasattr(shutil, "which"):
phantomjs_path = shutil.which("phantomjs") or "phantomjs"
else:
# Python 2 relies on Environment variable in PATH - attempt to use as follows
phantomjs_path = "phantomjs"
try:
proc = Popen([phantomjs_path, "--version"], stdout=PIPE, stderr=PIPE)
proc.wait()
out = proc.communicate()
if len(out[1]) > 0:
raise RuntimeError('Error encountered in PhantomJS detection: %r' % out[1].decode('utf8'))
required = V(version)
installed = V(out[0].decode('utf8'))
if installed < required:
raise RuntimeError('PhantomJS version to old. Version>=%s required, installed: %s' % (required, installed))
except OSError:
raise RuntimeError('PhantomJS is not present in PATH or BOKEH_PHANTOMJS_PATH. Try "conda install phantomjs" or \
"npm install -g phantomjs-prebuilt"')
return phantomjs_path | Detect if PhantomJS is avaiable in PATH, at a minimum version.
Args:
version (str, optional) :
Required minimum version for PhantomJS (mostly for testing)
Returns:
str, path to PhantomJS |
def variable_map_items(variable_map):
"""Yields an iterator over (string, variable) pairs in the variable map.
In general, variable maps map variable names to either a `tf.Variable`, or
list of `tf.Variable`s (in case of sliced variables).
Args:
variable_map: dict, variable map over which to iterate.
Yields:
(string, tf.Variable) pairs.
"""
for key, var_or_vars in six.iteritems(variable_map):
if isinstance(var_or_vars, (list, tuple)):
for variable in var_or_vars:
yield key, variable
else:
yield key, var_or_vars | Yields an iterator over (string, variable) pairs in the variable map.
In general, variable maps map variable names to either a `tf.Variable`, or
list of `tf.Variable`s (in case of sliced variables).
Args:
variable_map: dict, variable map over which to iterate.
Yields:
(string, tf.Variable) pairs. |
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
if token_placement != AUTH_HEADER:
raise ValueError("Invalid token placement.")
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body | Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable. |
def do_one_iteration(self):
"""step eventloop just once"""
if self.control_stream:
self.control_stream.flush()
for stream in self.shell_streams:
# handle at most one request per iteration
stream.flush(zmq.POLLIN, 1)
stream.flush(zmq.POLLOUT) | step eventloop just once |
def list_installed():
'''
Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed
'''
result = __salt__['pkg.version'](_package_name(), versions_as_list=True)
if result is None:
return []
if six.PY2:
return sorted(result, cmp=_cmp_version)
else:
return sorted(result, key=functools.cmp_to_key(_cmp_version)) | Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed |
def check_cv(cv=3, y=None, classifier=False):
"""Dask aware version of ``sklearn.model_selection.check_cv``
Same as the scikit-learn version, but works if ``y`` is a dask object.
"""
if cv is None:
cv = 3
# If ``cv`` is not an integer, the scikit-learn implementation doesn't
# touch the ``y`` object, so passing on a dask object is fine
if not is_dask_collection(y) or not isinstance(cv, numbers.Integral):
return model_selection.check_cv(cv, y, classifier)
if classifier:
# ``y`` is a dask object. We need to compute the target type
target_type = delayed(type_of_target, pure=True)(y).compute()
if target_type in ("binary", "multiclass"):
return StratifiedKFold(cv)
return KFold(cv) | Dask aware version of ``sklearn.model_selection.check_cv``
Same as the scikit-learn version, but works if ``y`` is a dask object. |
def is_period_arraylike(arr):
"""
Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical array-like or
PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return is_period_dtype(arr.dtype)
return getattr(arr, 'inferred_type', None) == 'period' | Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical array-like or
PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True |
def identify_protocol(method, value):
# type: (str, Union[str, RequestType]) -> str
"""
Loop through protocols, import the protocol module and try to identify the id or request.
"""
for protocol_name in PROTOCOLS:
protocol = importlib.import_module(f"federation.protocols.{protocol_name}.protocol")
if getattr(protocol, f"identify_{method}")(value):
return protocol
else:
raise NoSuitableProtocolFoundError() | Loop through protocols, import the protocol module and try to identify the id or request. |
def blackbox_and_coarse_grain(blackbox, coarse_grain):
"""Validate that a coarse-graining properly combines the outputs of a
blackboxing.
"""
if blackbox is None:
return
for box in blackbox.partition:
# Outputs of the box
outputs = set(box) & set(blackbox.output_indices)
if coarse_grain is None and len(outputs) > 1:
raise ValueError(
'A blackboxing with multiple outputs per box must be '
'coarse-grained.')
if (coarse_grain and not any(outputs.issubset(part)
for part in coarse_grain.partition)):
raise ValueError(
'Multiple outputs from a blackbox must be partitioned into '
'the same macro-element of the coarse-graining') | Validate that a coarse-graining properly combines the outputs of a
blackboxing. |
def _handle_utf8_payload(body, properties):
"""Update the Body and Properties to the appropriate encoding.
:param bytes|str|unicode body: Message payload
:param dict properties: Message properties
:return:
"""
if 'content_encoding' not in properties:
properties['content_encoding'] = 'utf-8'
encoding = properties['content_encoding']
if compatibility.is_unicode(body):
body = body.encode(encoding)
elif compatibility.PYTHON3 and isinstance(body, str):
body = bytes(body, encoding=encoding)
return body | Update the Body and Properties to the appropriate encoding.
:param bytes|str|unicode body: Message payload
:param dict properties: Message properties
:return: |
def schur_complement(mat, row, col):
""" compute the schur complement of the matrix block mat[row:,col:] of the matrix mat """
a = mat[:row, :col]
b = mat[:row, col:]
c = mat[row:, :col]
d = mat[row:, col:]
return a - b.dot(d.I).dot(c) | compute the schur complement of the matrix block mat[row:,col:] of the matrix mat |
def append_op(self, operation):
"""Append an :class:`Operation <stellar_base.operation.Operation>` to
the list of operations.
Add the operation specified if it doesn't already exist in the list of
operations of this :class:`Builder` instance.
:param operation: The operation to append to the list of operations.
:type operation: :class:`Operation`
:return: This builder instance.
"""
if operation not in self.ops:
self.ops.append(operation)
return self | Append an :class:`Operation <stellar_base.operation.Operation>` to
the list of operations.
Add the operation specified if it doesn't already exist in the list of
operations of this :class:`Builder` instance.
:param operation: The operation to append to the list of operations.
:type operation: :class:`Operation`
:return: This builder instance. |
def count_names_by_namespace(graph, namespace):
"""Get the set of all of the names in a given namespace that are in the graph.
:param pybel.BELGraph graph: A BEL graph
:param str namespace: A namespace keyword
:return: A counter from {name: frequency}
:rtype: collections.Counter
:raises IndexError: if the namespace is not defined in the graph.
"""
if namespace not in graph.defined_namespace_keywords:
raise IndexError('{} is not defined in {}'.format(namespace, graph))
return Counter(_namespace_filtered_iterator(graph, namespace)) | Get the set of all of the names in a given namespace that are in the graph.
:param pybel.BELGraph graph: A BEL graph
:param str namespace: A namespace keyword
:return: A counter from {name: frequency}
:rtype: collections.Counter
:raises IndexError: if the namespace is not defined in the graph. |
def deleteThreads(self, thread_ids):
"""
Deletes threads
:param thread_ids: Thread IDs to delete. See :ref:`intro_threads`
:return: Whether the request was successful
:raises: FBchatException if request failed
"""
thread_ids = require_list(thread_ids)
data_unpin = dict()
data_delete = dict()
for i, thread_id in enumerate(thread_ids):
data_unpin["ids[{}]".format(thread_id)] = "false"
data_delete["ids[{}]".format(i)] = thread_id
r_unpin = self._post(self.req_url.PINNED_STATUS, data_unpin)
r_delete = self._post(self.req_url.DELETE_THREAD, data_delete)
return r_unpin.ok and r_delete.ok | Deletes threads
:param thread_ids: Thread IDs to delete. See :ref:`intro_threads`
:return: Whether the request was successful
:raises: FBchatException if request failed |
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) | Logout and remove vid |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.