id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
34,961 | def test_fp16_to_fp32():
if tvm.target.codegen.llvm_version_major() < 6:
print(
"Skipping due to LLVM version being {} < 6".format(
tvm.target.codegen.llvm_version_major()
)
)
return
import platform
machine = platform.machine()
if machine != "x86_64" and machine != "i386" and machine != "AMD64":
print("Skipping test because the platform is: {} ".format(machine))
return
def fp16_to_fp32(target, width, match=None, not_match=None):
elements = 64
n = tvm.runtime.convert(elements)
A = te.placeholder((n, width), dtype="float16", name="A")
B = te.compute(A.shape, lambda *i: A(*i).astype("float32"), name="B")
s = te.create_schedule(B.op)
s[B].vectorize(s[B].op.axis[1])
f = tvm.build(s, [A, B], target)
assembly = f.get_source("asm").splitlines()
if match:
matches = [l for l in assembly if re.search(match, l)]
assert matches
if not_match:
not_matches = [l for l in assembly if re.search(not_match, l)]
assert not not_matches
fp16_to_fp32(
"llvm -mcpu=skylake-avx512", 15, match="vcvtph2ps.*ymm", not_match="vcvtph2ps.*zmm"
)
fp16_to_fp32("llvm -mcpu=skylake-avx512", 16, match="vcvtph2ps.*zmm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 17, match="vcvtph2ps.*zmm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 49, match="vcvtph2ps.*zmm")
fp16_to_fp32(
"llvm -mcpu=skylake-avx512 -mattr=-avx512f",
49,
match="vcvtph2ps.*ymm",
not_match="vcvtph2ps.*zmm",
)
fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-f16c,-avx512f", 49, not_match="vcvtph2ps")
fp16_to_fp32("llvm -mcpu=core-avx2", 8, match="vcvtph2ps.*ymm")
fp16_to_fp32("llvm -mcpu=core-avx2", 9, match="vcvtph2ps.*ymm")
fp16_to_fp32("llvm", 9, not_match="vcvtph2ps")
| def test_fp16_to_fp32():
if tvm.target.codegen.llvm_version_major() < 6:
print(
"Skipping due to LLVM version being {} < 6".format(
tvm.target.codegen.llvm_version_major()
)
)
return
import platform
machine = platform.machine()
if machine not in ["x86_64", "i386", "AMD64"]:
print("Skipping test because the platform is: {} ".format(machine))
return
def fp16_to_fp32(target, width, match=None, not_match=None):
elements = 64
n = tvm.runtime.convert(elements)
A = te.placeholder((n, width), dtype="float16", name="A")
B = te.compute(A.shape, lambda *i: A(*i).astype("float32"), name="B")
s = te.create_schedule(B.op)
s[B].vectorize(s[B].op.axis[1])
f = tvm.build(s, [A, B], target)
assembly = f.get_source("asm").splitlines()
if match:
matches = [l for l in assembly if re.search(match, l)]
assert matches
if not_match:
not_matches = [l for l in assembly if re.search(not_match, l)]
assert not not_matches
fp16_to_fp32(
"llvm -mcpu=skylake-avx512", 15, match="vcvtph2ps.*ymm", not_match="vcvtph2ps.*zmm"
)
fp16_to_fp32("llvm -mcpu=skylake-avx512", 16, match="vcvtph2ps.*zmm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 17, match="vcvtph2ps.*zmm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 49, match="vcvtph2ps.*zmm")
fp16_to_fp32(
"llvm -mcpu=skylake-avx512 -mattr=-avx512f",
49,
match="vcvtph2ps.*ymm",
not_match="vcvtph2ps.*zmm",
)
fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-f16c,-avx512f", 49, not_match="vcvtph2ps")
fp16_to_fp32("llvm -mcpu=core-avx2", 8, match="vcvtph2ps.*ymm")
fp16_to_fp32("llvm -mcpu=core-avx2", 9, match="vcvtph2ps.*ymm")
fp16_to_fp32("llvm", 9, not_match="vcvtph2ps")
|
46,252 | def _le(y):
"""create a new function that accepts a single value x, returns x < y."""
return lambda x: x <= y
| def _le(y):
"""create a new function that accepts a single value x, returns x <= y."""
return lambda x: x <= y
|
30,275 | def get_spf(auth, spf):
spf_context = {}
if auth is None:
spf_context["Validation-Result"] = spf.split(" ")[0].lower()
sender_ip = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", spf)
else:
result = re.search(r"spf=(\w+)", auth)
if result is not None:
spf_context["Validation-Result"] = result.group(1).lower()
sender_ip = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", auth)
if sender_ip != []:
spf_context["Sender-IP"] = sender_ip[0]
if spf is not None:
spf_context["Reason"] = re.findall(r"[(](.+)[)]", spf)[0]
return spf_context
| def get_spf(auth, spf):
spf_context = {}
if auth is None:
spf_context["Validation-Result"] = spf.split(" ")[0].lower()
sender_ip = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", spf)
else:
result = re.search(r"spf=(\w+)", auth)
if result is not None:
spf_context["Validation-Result"] = result.group(1).lower()
sender_ip = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", auth)
if sender_ip != []:
spf_context["Sender-IP"] = sender_ip[0]
if spf is not None:
spf_context["Reason"] = re.findall(r"\((.+)\)", spf)[0]
return spf_context
|
43,214 | def count_resume_run_attempts(instance: DagsterInstance, run_id: str):
events = instance.all_logs(run_id, of_type=DagsterEventType.ENGINE_EVENT)
return len([event for event in events if event.message == RESUME_RUN_LOG_MESSAGE])
| def count_resume_run_attempts(instance: DagsterInstance, run_id: str):
events = instance.all_logs(run_id, of_type=DagsterEventType.ENGINE_EVENT)
return len(event for event in events if event.message == RESUME_RUN_LOG_MESSAGE)
|
22,748 | def _build_snap(target, archs, status, lock):
status[target] = {arch: '...' for arch in archs}
if target == 'certbot':
workspace = CERTBOT_DIR
else:
workspace = join(CERTBOT_DIR, target)
subprocess.check_output(
('"{0}" tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt '
'| grep -v python-augeas > "{1}/snap-constraints.txt"').format(sys.executable, workspace),
shell=True, cwd=CERTBOT_DIR)
retry = 3
while retry:
exit_code, process_output = _execute_build(target, archs, status, workspace)
print(f'Build {target} for {",".join(archs)} (attempt {4-retry}/3) ended with exit code {exit_code}.')
sys.stdout.flush()
with lock:
failed_archs = [arch for arch in archs if status[target][arch] == 'Failed to build']
if exit_code == 0 and not failed_archs:
# We expect to have all target snaps available, or something bad happened.
snaps_list = glob.glob(join(workspace, '*.snap'))
if not len(snaps_list) == len(archs):
print(f'Some of the expected snaps for a successful build are missing (current list: {snaps_list}).')
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
else:
break
if failed_archs:
# We expect for each failed builds to have a build output, or something bad happened.
missing_outputs = False
for arch in failed_archs:
if not exists(join(workspace, f'{target}_{failed_archs}.txt')):
missing_outputs = True
print(f'Missing output on a failed build {target} for {arch}.')
if missing_outputs:
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
# Retry the remote build if it has been interrupted (non zero status code) or if some builds have failed.
retry = retry - 1
return {target: workspace}
| def _build_snap(target, archs, status, lock):
status[target] = {arch: '...' for arch in archs}
if target == 'certbot':
workspace = CERTBOT_DIR
else:
workspace = join(CERTBOT_DIR, target)
subprocess.check_output(
('"{0}" tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt '
'| grep -v python-augeas > "{1}/snap-constraints.txt"').format(sys.executable, workspace),
shell=True, cwd=CERTBOT_DIR)
retry = 3
while retry:
exit_code, process_output = _execute_build(target, archs, status, workspace)
print(f'Build {target} for {",".join(archs)} (attempt {4-retry}/3) ended with exit code {exit_code}.')
sys.stdout.flush()
with lock:
failed_archs = [arch for arch in archs if status[target][arch] == 'Failed to build']
if exit_code == 0 and not failed_archs:
# We expect to have all target snaps available, or something bad happened.
snaps_list = glob.glob(join(workspace, '*.snap'))
if not len(snaps_list) == len(archs):
print(f'Some of the expected snaps for a successful build are missing (current list: {snaps_list}).')
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
else:
break
if failed_archs:
# We expect for each failed builds to have a build output, or something bad happened.
missing_outputs = False
for arch in failed_archs:
if not exists(join(workspace, f'{target}_{arch}.txt')):
missing_outputs = True
print(f'Missing output on a failed build {target} for {arch}.')
if missing_outputs:
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
# Retry the remote build if it has been interrupted (non zero status code) or if some builds have failed.
retry = retry - 1
return {target: workspace}
|
59 | def add_cover(cover_url, ekey, account=None):
"""
Adds a cover to coverstore and returns the cover id.
:param str cover_url: URL of cover image
:param str ekey: Edition key /book/OL..M
:rtype: int or None
:return: Cover id, or None if upload did not succeed
"""
olid = ekey.split('/')[-1]
coverstore_url = config.get('coverstore_url').rstrip('/')
upload_url = coverstore_url + '/b/upload2'
if upload_url.startswith('//'):
upload_url = '{}:{}'.format(web.ctx.get('protocol', 'http'), upload_url)
user = account or accounts.get_current_user()
if not user:
raise RuntimeError("accounts.get_current_user() failed")
params = {
'author': user.get('key') or user.get('_key'),
'data': None,
'source_url': cover_url,
'olid': olid,
'ip': web.ctx.ip,
}
reply = None
for attempt in range(10):
try:
payload = requests.compat.urlencode(params).encode('utf-8')
response = requests.post(upload_url, data=payload)
except requests.HTTPError:
sleep(2)
continue
body = response.text
if response.getcode() == 500:
raise CoverNotSaved(body)
if body not in ['', 'None']:
reply = response.json()
if response.getcode() == 200 and 'id' in reply:
break
sleep(2)
if not reply or reply.get('message') == 'Invalid URL':
return
cover_id = int(reply['id'])
return cover_id
| def add_cover(cover_url, ekey, account=None):
"""
Adds a cover to coverstore and returns the cover id.
:param str cover_url: URL of cover image
:param str ekey: Edition key /book/OL..M
:rtype: int or None
:return: Cover id, or None if upload did not succeed
"""
olid = ekey.split('/')[-1]
coverstore_url = config.get('coverstore_url').rstrip('/')
upload_url = coverstore_url + '/b/upload2'
if upload_url.startswith('//'):
upload_url = '{}:{}'.format(web.ctx.get('protocol', 'http'), upload_url)
user = account or accounts.get_current_user()
if not user:
raise RuntimeError("accounts.get_current_user() failed")
params = {
'author': user.get('key') or user.get('_key'),
'data': None,
'source_url': cover_url,
'olid': olid,
'ip': web.ctx.ip,
}
reply = None
for attempt in range(10):
try:
payload = requests.compat.urlencode(params).encode('utf-8')
response = requests.post(upload_url, data=payload)
except requests.HTTPError:
sleep(2)
continue
body = response.text
if response.status_code == 500:
raise CoverNotSaved(body)
if body not in ['', 'None']:
reply = response.json()
if response.getcode() == 200 and 'id' in reply:
break
sleep(2)
if not reply or reply.get('message') == 'Invalid URL':
return
cover_id = int(reply['id'])
return cover_id
|
13,938 | def _google_oauth2_session(request, additional_scopes=None, with_scope=True, **kwargs):
scopes = _get_default_scopes()
if additional_scopes:
scopes = set(scopes).union(set(additional_scopes))
if with_scope:
kwargs['scope'] = sorted(scopes)
original_url = f"{request.scheme}://{request.META['HTTP_HOST']}"
# Use hardcoded uri for oauth flow to avoid having to set redirect_urls
# for every single new app version.
url = OAUTH2_REDIRECT_URL if OAUTH2_REDIRECT_URL else original_url
kwargs['redirect_uri'] = f"{url}{reverse('googleauth_oauth2callback')}"
logging.info('Create google oauth2 session with redirect uri: %s', kwargs['redirect_uri'])
client_id = getattr(settings, _CLIENT_ID_SETTING)
assert client_id
return OAuth2Session(client_id, **kwargs)
| def _google_oauth2_session(request, additional_scopes=None, with_scope=True, **kwargs):
scopes = _get_default_scopes()
if additional_scopes:
scopes = set(scopes).union(set(additional_scopes))
if with_scope:
kwargs['scope'] = sorted(scopes)
original_url = f"{request.scheme}://{request.META['HTTP_HOST']}"
# Use hardcoded uri for oauth flow to avoid having to set redirect_urls
# for every single new app version.
url = OAUTH2_REDIRECT_URL or original_url
kwargs['redirect_uri'] = f"{url}{reverse('googleauth_oauth2callback')}"
logging.info('Create google oauth2 session with redirect uri: %s', kwargs['redirect_uri'])
client_id = getattr(settings, _CLIENT_ID_SETTING)
assert client_id
return OAuth2Session(client_id, **kwargs)
|
5,667 | def pearsonr(x, y, *, alternative='two-sided', confidence_level=0.95):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
confdence_level : float, optional
The confidence level for the calculation of the confidence
interval for the correlation coefficent. Default is 0.95.
Returns
-------
result : PearsonRResult
An object with the following attributes:
* `r` : float
Pearson product-moment correlation coefficent
* `pvalue` : float
P-value.
* `fisher_ci` : ConfidenceInterval, namedtuple
The confidence interval for `r`, stored in a ``namedtuple``
with fields `low` and `high`.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that x and y are drawn from independent normal
distributions (so the population correlation coefficient is 0), the
probability density function of the sample correlation coefficient r
is ([1]_, [2]_)::
(1 - r**2)**(n/2 - 2)
f(r) = ---------------------
B(1/2, n/2 - 1)
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
For backwards compatibility, the object that is returned behaves
like a tuple of length two that holds ``r`` and the p-value.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
result = PearsonRResult(r=np.nan, pvalue=np.nan,
fishers_ci=ConfidenceInterval(low=np.nan,
high=np.nan))
return result
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
r = dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0]))
result = PearsonRResult(r=r, pvalue=1.0,
fishers_ci=ConfidenceInterval(low=np.nan,
high=np.nan))
return result
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
if alternative == 'two-sided':
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
elif alternative == 'less':
prob = 1 - special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
elif alternative == 'greater':
prob = special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
else:
raise ValueError('alternative must be one of '
'["two-sided", "less", "greater"]')
if n > 3:
rlow, rhigh = _pearsonr_fishers_ci(r, n, confidence_level,
alternative)
else:
rlow = np.nan
rhigh = np.nan
result = PearsonRResult(r=r, pvalue=prob,
fishers_ci=ConfidenceInterval(low=rlow,
high=rhigh))
return result
| def pearsonr(x, y, *, alternative='two-sided', confidence_level=0.95):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
confidence_level : float, optional
The confidence level for the calculation of the confidence
interval for the correlation coefficent. Default is 0.95.
Returns
-------
result : PearsonRResult
An object with the following attributes:
* `r` : float
Pearson product-moment correlation coefficent
* `pvalue` : float
P-value.
* `fisher_ci` : ConfidenceInterval, namedtuple
The confidence interval for `r`, stored in a ``namedtuple``
with fields `low` and `high`.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that x and y are drawn from independent normal
distributions (so the population correlation coefficient is 0), the
probability density function of the sample correlation coefficient r
is ([1]_, [2]_)::
(1 - r**2)**(n/2 - 2)
f(r) = ---------------------
B(1/2, n/2 - 1)
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
For backwards compatibility, the object that is returned behaves
like a tuple of length two that holds ``r`` and the p-value.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
result = PearsonRResult(r=np.nan, pvalue=np.nan,
fishers_ci=ConfidenceInterval(low=np.nan,
high=np.nan))
return result
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
r = dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0]))
result = PearsonRResult(r=r, pvalue=1.0,
fishers_ci=ConfidenceInterval(low=np.nan,
high=np.nan))
return result
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
if alternative == 'two-sided':
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
elif alternative == 'less':
prob = 1 - special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
elif alternative == 'greater':
prob = special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
else:
raise ValueError('alternative must be one of '
'["two-sided", "less", "greater"]')
if n > 3:
rlow, rhigh = _pearsonr_fishers_ci(r, n, confidence_level,
alternative)
else:
rlow = np.nan
rhigh = np.nan
result = PearsonRResult(r=r, pvalue=prob,
fishers_ci=ConfidenceInterval(low=rlow,
high=rhigh))
return result
|
33,438 | def load_inline(path: Path) -> ModuleType | None:
# nox uses here the importlib.machinery.SourceFileLoader but I consider this similarly good, and we can keep any
# name for the tox file, it's content will always be loaded in the this module from a system point of view
for name in ("toxfile", "☣"):
candidate = path.parent / f"{name}.py"
if candidate.exists():
return _load_plugin(candidate)
return None
| def load_inline(path: Path) -> ModuleType | None:
# nox uses here the importlib.machinery.SourceFileLoader but I consider this similarly good, and we can keep any
# name for the tox file, its content will always be loaded in the this module from a system point of view
for name in ("toxfile", "☣"):
candidate = path.parent / f"{name}.py"
if candidate.exists():
return _load_plugin(candidate)
return None
|
26,661 | def _get_message_attribute(o):
if isinstance(o, bytes):
return {'DataType': 'Binary', 'BinaryValue': o}
elif isinstance(o, str):
return {'DataType': 'String', 'StringValue': o}
elif isinstance(o, (int, float)):
return {'DataType': 'Number', 'StringValue': str(o)}
elif hasattr(o, '__iter__'):
return {'DataType': 'String.Array', 'StringValue': json.dumps(o)}
else:
raise TypeError('Values in MessageAttributes must be one of bytes, str, int, float, or iterable; '
f'got {type(o)}')
| def _get_message_attribute(o):
if isinstance(o, bytes):
return {'DataType': 'Binary', 'BinaryValue': o}
elif isinstance(o, str):
return {'DataType': 'String', 'StringValue': o}
elif isinstance(o, (int, float)):
return {'DataType': 'Number', 'StringValue': str(o)}
if hasattr(o, '__iter__'):
return {'DataType': 'String.Array', 'StringValue': json.dumps(o)}
else:
raise TypeError('Values in MessageAttributes must be one of bytes, str, int, float, or iterable; '
f'got {type(o)}')
|
1,666 | def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray of shape (n_samples,)
Target values
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float, default=1.0
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
fit_intercept : bool, default=True
Whether to fit an intercept or not
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
| def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray of shape (n_samples,)
Target values
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float, default=1.0
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
fit_intercept : bool, default=True
Whether to fit an intercept or not
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
|
31,095 | def test_module(client: Client, args: dict) -> str:
"""Tests API connectivity and authentication'
:type client: ``dict``
:param args: contains the test modules arguments.
:type client: ``Client``
:param client: Tripwire client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
if args.get('isFetch'):
params, fetch_filter, last_fetch = prepare_fetch(args, args.get('first_fetch', ''))
fetch_incidents(client=client, last_fetch=last_fetch, fetch_filter=fetch_filter, max_results=1)
client.get_nodes("")
return 'ok'
| def test_module(client: Client, params: dict) -> str:
"""Tests API connectivity and authentication'
:type client: ``dict``
:param args: contains the test modules arguments.
:type client: ``Client``
:param client: Tripwire client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
if args.get('isFetch'):
params, fetch_filter, last_fetch = prepare_fetch(args, args.get('first_fetch', ''))
fetch_incidents(client=client, last_fetch=last_fetch, fetch_filter=fetch_filter, max_results=1)
client.get_nodes("")
return 'ok'
|
46,633 | def create_vanilla_bq_loop_with_rbf_kernel(X: np.ndarray, Y: np.ndarray,
integral_bounds: Union[None, List[Tuple[float, float]]],
measure: Union[None, IntegrationMeasure],
rbf_lengthscale: float=1.0, rbf_variance: float=1.0) \
-> VanillaBayesianQuadratureLoop:
"""
:param X: initial training point locations, shape (n_points, input_dim)
:param Y: initial training point function values, shape (n_points, 1)
:param integral_bounds: List of input_dim tuples, where input_dim is the dimensionality of the integral
and the tuples contain the lower and upper bounds of the integral i.e.,
[(lb_1, ub_1), (lb_2, ub_2), ..., (lb_D, ub_D)]. None means infinite bounds.
:param measure: the integration measure. None means no measure.
:param rbf_lengthscale: the lengthscale of the rbf kernel, defaults to 1.
:param rbf_variance: the variance of the rbf kernel, defaults to 1.
:return: The vanilla BQ loop
"""
if integral_bounds is not None:
if not len(integral_bounds) == X.shape[1]:
D_bounds = len(integral_bounds)
input_dim = X.shape[1]
raise ValueError("number of integral bounds ", D_bounds, " provided does not match the input "
"dimension ", input_dim, ".")
if rbf_lengthscale <= 0:
raise ValueError("rbf lengthscale must be positive. The current value is ", rbf_lengthscale, ".")
if rbf_variance <= 0:
raise ValueError("rbf variance must be positive. The current value is ", rbf_variance, ".")
gpy_model = GPy.models.GPRegression(X=X, Y=Y, kernel=GPy.kern.RBF(input_dim=X.shape[1],
lengthscale=rbf_lengthscale,
variance=rbf_variance))
emukit_model = convert_gpy_model_to_emukit_model(gpy_model=gpy_model,
integral_bounds=integral_bounds,
measure=measure)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model, X=X, Y=Y)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
return emukit_loop
| def create_vanilla_bq_loop_with_rbf_kernel(X: np.ndarray, Y: np.ndarray,
measure: IntegrationMeasure = None,
rbf_lengthscale: float=1.0, rbf_variance: float=1.0) \
-> VanillaBayesianQuadratureLoop:
"""
:param X: initial training point locations, shape (n_points, input_dim)
:param Y: initial training point function values, shape (n_points, 1)
:param integral_bounds: List of input_dim tuples, where input_dim is the dimensionality of the integral
and the tuples contain the lower and upper bounds of the integral i.e.,
[(lb_1, ub_1), (lb_2, ub_2), ..., (lb_D, ub_D)]. None means infinite bounds.
:param measure: the integration measure. None means no measure.
:param rbf_lengthscale: the lengthscale of the rbf kernel, defaults to 1.
:param rbf_variance: the variance of the rbf kernel, defaults to 1.
:return: The vanilla BQ loop
"""
if integral_bounds is not None:
if not len(integral_bounds) == X.shape[1]:
D_bounds = len(integral_bounds)
input_dim = X.shape[1]
raise ValueError("number of integral bounds ", D_bounds, " provided does not match the input "
"dimension ", input_dim, ".")
if rbf_lengthscale <= 0:
raise ValueError("rbf lengthscale must be positive. The current value is ", rbf_lengthscale, ".")
if rbf_variance <= 0:
raise ValueError("rbf variance must be positive. The current value is ", rbf_variance, ".")
gpy_model = GPy.models.GPRegression(X=X, Y=Y, kernel=GPy.kern.RBF(input_dim=X.shape[1],
lengthscale=rbf_lengthscale,
variance=rbf_variance))
emukit_model = convert_gpy_model_to_emukit_model(gpy_model=gpy_model,
integral_bounds=integral_bounds,
measure=measure)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model, X=X, Y=Y)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
return emukit_loop
|
57,281 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
| def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError as e:
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
|
17,304 | def proxies_from_env() -> Dict[str, ProxyInfo]:
proxy_urls = {k: URL(v) for k, v in getproxies().items()
if k in ('http', 'https', 'ws', 'wss')}
netrc_obj = netrc_from_env()
stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()}
ret = {}
for proto, val in stripped.items():
proxy, auth = val
if proxy.scheme in ('https', 'ws', 'wss'):
client_logger.warning(
"%s proxies %s are not supported, ignoring",
proxy.scheme.upper(), proxy)
continue
if netrc_obj and auth is None:
auth_from_netrc = None
if proxy.host is not None:
auth_from_netrc = netrc_obj.authenticators(proxy.host)
if auth_from_netrc is not None:
# auth_from_netrc is a (`user`, `account`, `password`) tuple,
# `user` and `account` both can be username,
# if `user` is None, use `account`
*logins, password = auth_from_netrc
login = logins[0] if logins[0] else logins[-1]
auth = BasicAuth(cast(str, login), cast(str, password))
ret[proto] = ProxyInfo(proxy, auth)
return ret
| def proxies_from_env() -> Dict[str, ProxyInfo]:
proxy_urls = {k: URL(v) for k, v in getproxies().items()
if k in ('http', 'https', 'ws', 'wss')}
netrc_obj = netrc_from_env()
stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()}
ret = {}
for proto, val in stripped.items():
proxy, auth = val
if proxy.scheme in ('https', 'wss'):
client_logger.warning(
"%s proxies %s are not supported, ignoring",
proxy.scheme.upper(), proxy)
continue
if netrc_obj and auth is None:
auth_from_netrc = None
if proxy.host is not None:
auth_from_netrc = netrc_obj.authenticators(proxy.host)
if auth_from_netrc is not None:
# auth_from_netrc is a (`user`, `account`, `password`) tuple,
# `user` and `account` both can be username,
# if `user` is None, use `account`
*logins, password = auth_from_netrc
login = logins[0] if logins[0] else logins[-1]
auth = BasicAuth(cast(str, login), cast(str, password))
ret[proto] = ProxyInfo(proxy, auth)
return ret
|
30,579 | def main():
try:
handle_proxy()
active_command = demisto.command()
if active_command == 'test-module':
test_module()
demisto.results('ok')
elif active_command == 'fetch-incidents':
fetch_incidents_command()
elif active_command == 'ip':
ip_command()
elif active_command == 'domain':
domain_command()
elif active_command == 'certificate':
certificate_command()
elif active_command == 'behavior':
behavior_command()
# Log exceptions
except Exception as e:
demisto.error(str(e) + "\n\nTrace:\n" + traceback.format_exc())
return_error(str(e))
| def main():
try:
handle_proxy()
active_command = demisto.command()
if active_command == 'test-module':
test_module()
demisto.results('ok')
elif active_command == 'fetch-incidents':
fetch_incidents_command()
elif active_command == 'ip':
ip_command()
elif active_command == 'domain':
domain_command()
elif active_command == 'certificate':
certificate_command()
elif active_command == 'expanse-get-behavior':
behavior_command()
# Log exceptions
except Exception as e:
demisto.error(str(e) + "\n\nTrace:\n" + traceback.format_exc())
return_error(str(e))
|
46,456 | def autoregressive_timeseries(coef: Sequence[float],
start_values: Optional[Sequence[float]] = None,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp('2000-01-01'),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = 'D',
column_name: Optional[str] = 'autoregressive') -> TimeSeries:
"""
Creates a univariate, autoregressive TimeSeries whose values are calculated using specified coefficients `coef` and
starting values `start_values`.
Parameters
----------
coef : list of float
The autoregressive coefficients used for calculating the next time step.
series[t] = coef[-1] * series[t-1] + coef[-2] * series[t-2] + ... + coef[0] * series[t-len(coef)]
start_values : list of float, default: np.ones(len(coef))
The starting values used for calculating the first few values for which no lags exist yet.
series[0] = coef[-1] * starting_values[-1] + coef[-2] * starting_values[-2] + ... + coef[0] * starting_values[0]
start : pd.Timestamp or int, optional
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end : pd.Timestamp or int, default: pd.Timestamp('2000-01-01')
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length : int, optional
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq : str, default: 'D'
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name : str, default: 'autoregressive'
Optionally, the name of the value column for the returned TimeSeries
Returns
-------
TimeSeries
An autoregressive TimeSeries created as indicated above.
"""
# if no start values specified default to a list of 1s
if start_values is None:
start_values = np.ones(len(coef))
else:
raise_if_not(len(start_values) == len(coef), "start_values must have same length as coef.")
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.empty(len(coef) + len(index))
values[:len(coef)] = start_values
for i in range(len(coef), len(coef) + len(index)):
# calculate next time step as dot product of coefs with previous len(coef) time steps
values[i] = np.dot(values[i - len(coef):i], coef)
return TimeSeries.from_times_and_values(index, values[len(coef):], freq=freq, columns=pd.Index([column_name]))
| def autoregressive_timeseries(coef: Sequence[float],
start_values: Optional[Sequence[float]] = None,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp('2000-01-01'),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = 'D',
column_name: Optional[str] = 'autoregressive') -> TimeSeries:
"""
Creates a univariate, autoregressive TimeSeries whose values are calculated using specified coefficients `coef` and
starting values `start_values`.
Parameters
----------
coef : Sequence of float
The autoregressive coefficients used for calculating the next time step.
series[t] = coef[-1] * series[t-1] + coef[-2] * series[t-2] + ... + coef[0] * series[t-len(coef)]
start_values : list of float, default: np.ones(len(coef))
The starting values used for calculating the first few values for which no lags exist yet.
series[0] = coef[-1] * starting_values[-1] + coef[-2] * starting_values[-2] + ... + coef[0] * starting_values[0]
start : pd.Timestamp or int, optional
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end : pd.Timestamp or int, default: pd.Timestamp('2000-01-01')
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length : int, optional
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq : str, default: 'D'
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name : str, default: 'autoregressive'
Optionally, the name of the value column for the returned TimeSeries
Returns
-------
TimeSeries
An autoregressive TimeSeries created as indicated above.
"""
# if no start values specified default to a list of 1s
if start_values is None:
start_values = np.ones(len(coef))
else:
raise_if_not(len(start_values) == len(coef), "start_values must have same length as coef.")
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.empty(len(coef) + len(index))
values[:len(coef)] = start_values
for i in range(len(coef), len(coef) + len(index)):
# calculate next time step as dot product of coefs with previous len(coef) time steps
values[i] = np.dot(values[i - len(coef):i], coef)
return TimeSeries.from_times_and_values(index, values[len(coef):], freq=freq, columns=pd.Index([column_name]))
|
1,321 | def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True,
n_jobs=None, method='lars', iter_offset=0,
random_state=None, return_inner_stats=False,
inner_stats=None, return_n_iter=False,
positive_dict=False, positive_code=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback : callable or None, optional (default: None)
callable that gets invoked every five iterations
batch_size : int,
The number of samples to take in each batch.
verbose : bool, optional (default: False)
To control the verbosity of the procedure.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int or None, optional (default=None)
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
positive_dict : bool
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
if method == 'lars' and positive_code:
raise ValueError(
"Positive constraint not supported for \"lars\" coding method."
)
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
dictionary = np.require(dictionary, requirements='W')
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs,
check_input=False,
positive=positive_code).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state,
positive=positive_dict)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False,
positive=positive_code)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
| def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True,
n_jobs=None, method='lars', iter_offset=0,
random_state=None, return_inner_stats=False,
inner_stats=None, return_n_iter=False,
positive_dict=False, positive_code=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback : callable or None, optional (default: None)
callable that gets invoked every five iterations
batch_size : int,
The number of samples to take in each batch.
verbose : bool, optional (default: False)
To control the verbosity of the procedure.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int or None, optional (default=None)
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
positive_dict : bool
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
if method == 'lars' and positive_code:
raise ValueError(
"Positive constraint not supported for 'lars' coding method."
)
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
dictionary = np.require(dictionary, requirements='W')
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs,
check_input=False,
positive=positive_code).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state,
positive=positive_dict)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False,
positive=positive_code)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
|
78 | def _process_data(data):
"""Convert keys from /a/, /b/, /l/ and /user/ to /authors/, /books/, /languages/ and /people/ respectively.
"""
if isinstance(data, list):
return [_process_data(d) for d in data]
elif isinstance(data, dict):
if 'key' in data:
data['key'] = _process_key(data['key'])
# convert date to ISO format
if data.get('type', '') == '/type/datetime':
data['value'] = data['value'].replace(' ', 'T')
return dict((k, _process_data(v)) for k, v in data.items())
else:
return data
| def _process_data(data):
"""Convert keys from /a/, /b/, /l/ and /user/ to /authors/, /books/, /languages/ and /people/ respectively.
"""
if isinstance(data, list):
return [_process_data(d) for d in data]
elif isinstance(data, dict):
if 'key' in data:
data['key'] = _process_key(data['key'])
# convert date to ISO format
if data.get('type') == '/type/datetime':
data['value'] = data['value'].replace(' ', 'T')
return dict((k, _process_data(v)) for k, v in data.items())
else:
return data
|
1,632 | def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance, default=None
Determines random subset of of samples.
Use an int to use the randomness deterministic.
See :term: `Glossary <random_state>`
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
| def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance, default=None
Determines random subset of samples.
Use an int to use the randomness deterministic.
See :term: `Glossary <random_state>`
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
|
36,052 | def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument
"""Check that the instructions dict contains the necessary keywords"""
instructions_dict = instructions.get_dict()
retrieve_files = instructions_dict.get('retrieve_files', None)
if retrieve_files is None:
errmsg = (
'\n\n'
'no indication of what to do in the instruction node:\n > {}\n'
'(to store the files in the repository set retrieve_files=True,\n'
'to copy them to the specified folder on the remote computer,\n'
'set it to False)\n'
)
return errmsg.format(instructions.uuid)
if not isinstance(retrieve_files, bool):
errmsg = (
'entry for retrieve files inside of instruction node {} must be\n'
'either True or False; instead, it is: {}'
)
return errmsg.format(instructions.uuid, retrieve_files)
local_files = instructions_dict.get('local_files', None)
remote_files = instructions_dict.get('remote_files', None)
symlink_files = instructions_dict.get('symlink_files', None)
if not any([local_files, remote_files, symlink_files]):
errmsg = (
'no indication of which files to copy were found in the instruction node {}.\n'
'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n'
'These should be lists containing tuples following the pattern:\n'
'[ ... (source_node_key, source_relpath, target_relpath) ... ] \n'
)
return errmsg.format(instructions.uuid)
| def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument
"""Check that the instructions dict contains the necessary keywords"""
instructions_dict = instructions.get_dict()
retrieve_files = instructions_dict.get('retrieve_files', None)
if retrieve_files is None:
errmsg = (
'\n\n'
'no indication of what to do in the instruction node:\n > {}\n'
'(to store the files in the repository set retrieve_files=True,\n'
'to copy them to the specified folder on the remote computer,\n'
'set it to False)\n'
)
return errmsg.format(instructions.uuid)
if not isinstance(retrieve_files, bool):
errmsg = (
'entry for retrieve files inside of instruction node {} must be\n'
'either True or False; instead, it is: {}'
)
return errmsg.format(instructions.uuid, retrieve_files)
local_files = instructions_dict.get('local_files', None)
remote_files = instructions_dict.get('remote_files', None)
symlink_files = instructions_dict.get('symlink_files', None)
if not any([local_files, remote_files, symlink_files]):
errmsg = (
f'no indication of which files to copy was found in the instructions node: {instructions}.\n'
'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n'
'These should be lists containing tuples following the pattern:\n'
'[ ... (source_node_key, source_relpath, target_relpath) ... ] \n'
)
return errmsg.format(instructions.uuid)
|
40,006 | def main():
ep = """
use --nspath to validate against an extension. If --ns is not specified,
validate against all namespaces in namespace file.
"""
parser = ArgumentParser(description="Validate an NWB file", epilog=ep)
parser.add_argument("paths", type=str, nargs='+', help="NWB file paths")
parser.add_argument('-p', '--nspath', type=str, help="the path to the namespace YAML file")
parser.add_argument("-n", "--ns", type=str, help="the namespace to validate against")
parser.add_argument("-lns", "--list-namespaces", dest="list_namespaces",
action='store_true', help="List the available namespaces and exit.")
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument("--cached-namespace", dest="cached_namespace", action='store_true',
help="Use the cached namespace (default: %(default)s).", default=True)
feature_parser.add_argument('--no-cached-namespace', dest="cached_namespace", action='store_false',
help="Don't use the cached namespace.")
feature_parser.add_argument('--severity', dest="severity", type=int,
help="Report anything with the given severity or higher as error (default: %(default)s).",
default=10, choices=range(0, 11))
args = parser.parse_args()
ret = 0
if args.nspath:
if not os.path.isfile(args.nspath):
print("The namespace file {} is not a valid file.".format(args.nspath), file=sys.stderr)
sys.exit(1)
if args.cached_namespace:
print("Turning off validation against cached namespace information "
"as --nspath was passed.", file=sys.stderr)
args.cached_namespace = False
for path in args.paths:
if not os.path.isfile(path):
print("The file {} does not exist.".format(path), file=sys.stderr)
ret = 1
continue
if args.cached_namespace:
catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace)
ns_deps = NWBHDF5IO.load_namespaces(catalog, path)
s = set(ns_deps.keys()) # determine which namespaces are the most
for k in ns_deps: # specific (i.e. extensions) and validate
s -= ns_deps[k].keys() # against those
namespaces = list(sorted(s))
if len(namespaces) > 0:
tm = TypeMap(catalog)
manager = BuildManager(tm)
specloc = "cached namespace information"
else:
manager = None
namespaces = [CORE_NAMESPACE]
specloc = "pynwb namespace information"
print("The file {} has no cached namespace information. "
"Falling back to {}.".format(path, specloc), file=sys.stderr)
elif args.nspath:
catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace)
namespaces = catalog.load_namespaces(args.nspath)
if len(namespaces) == 0:
print("Could not load namespaces from file {}.".format(args.nspath), file=sys.stderr)
sys.exit(1)
tm = TypeMap(catalog)
manager = BuildManager(tm)
specloc = "--nspath namespace information"
else:
manager = None
namespaces = [CORE_NAMESPACE]
specloc = "pynwb namespace information"
if args.list_namespaces:
print("\n".join(namespaces))
ret = 0
continue
if args.ns:
if args.ns in namespaces:
namespaces = [args.ns]
else:
print("The namespace {} could not be found in {} as only {} is present.".format(
args.ns, specloc, namespaces), file=sys.stderr)
ret = 1
continue
with NWBHDF5IO(path, mode='r', manager=manager) as io:
for ns in namespaces:
print("Validating {} against {} using namespace {}.".format(path, specloc, ns))
ret = ret or _validate_helper(io=io, namespace=ns, severity=args.severity)
sys.exit(ret)
| def main():
ep = """
use --nspath to validate against an extension. If --ns is not specified,
validate against all namespaces in namespace file.
"""
parser = ArgumentParser(description="Validate an NWB file", epilog=ep)
parser.add_argument("paths", type=str, nargs='+', help="NWB file paths")
parser.add_argument('-p', '--nspath', type=str, help="the path to the namespace YAML file")
parser.add_argument("-n", "--ns", type=str, help="the namespace to validate against")
parser.add_argument("-lns", "--list-namespaces", dest="list_namespaces",
action='store_true', help="List the available namespaces and exit.")
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument("--cached-namespace", dest="cached_namespace", action='store_true',
help="Use the cached namespace (default: %(default)).", default=True)
feature_parser.add_argument('--no-cached-namespace', dest="cached_namespace", action='store_false',
help="Don't use the cached namespace.")
feature_parser.add_argument('--severity', dest="severity", type=int,
help="Report anything with the given severity or higher as error (default: %(default)s).",
default=10, choices=range(0, 11))
args = parser.parse_args()
ret = 0
if args.nspath:
if not os.path.isfile(args.nspath):
print("The namespace file {} is not a valid file.".format(args.nspath), file=sys.stderr)
sys.exit(1)
if args.cached_namespace:
print("Turning off validation against cached namespace information "
"as --nspath was passed.", file=sys.stderr)
args.cached_namespace = False
for path in args.paths:
if not os.path.isfile(path):
print("The file {} does not exist.".format(path), file=sys.stderr)
ret = 1
continue
if args.cached_namespace:
catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace)
ns_deps = NWBHDF5IO.load_namespaces(catalog, path)
s = set(ns_deps.keys()) # determine which namespaces are the most
for k in ns_deps: # specific (i.e. extensions) and validate
s -= ns_deps[k].keys() # against those
namespaces = list(sorted(s))
if len(namespaces) > 0:
tm = TypeMap(catalog)
manager = BuildManager(tm)
specloc = "cached namespace information"
else:
manager = None
namespaces = [CORE_NAMESPACE]
specloc = "pynwb namespace information"
print("The file {} has no cached namespace information. "
"Falling back to {}.".format(path, specloc), file=sys.stderr)
elif args.nspath:
catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace)
namespaces = catalog.load_namespaces(args.nspath)
if len(namespaces) == 0:
print("Could not load namespaces from file {}.".format(args.nspath), file=sys.stderr)
sys.exit(1)
tm = TypeMap(catalog)
manager = BuildManager(tm)
specloc = "--nspath namespace information"
else:
manager = None
namespaces = [CORE_NAMESPACE]
specloc = "pynwb namespace information"
if args.list_namespaces:
print("\n".join(namespaces))
ret = 0
continue
if args.ns:
if args.ns in namespaces:
namespaces = [args.ns]
else:
print("The namespace {} could not be found in {} as only {} is present.".format(
args.ns, specloc, namespaces), file=sys.stderr)
ret = 1
continue
with NWBHDF5IO(path, mode='r', manager=manager) as io:
for ns in namespaces:
print("Validating {} against {} using namespace {}.".format(path, specloc, ns))
ret = ret or _validate_helper(io=io, namespace=ns, severity=args.severity)
sys.exit(ret)
|
24,894 | def find_pylintrc() -> Optional[str]:
"""search the pylint rc file and return its path if it finds it, else None"""
for config_file in find_default_config_files():
if config_file.endswith("pylintrc"):
return config_file
return None
| def find_pylintrc() -> Optional[str]:
"""Search the pylint rc file and return its path if it finds it, else return None"""
for config_file in find_default_config_files():
if config_file.endswith("pylintrc"):
return config_file
return None
|
25,761 | def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1,
max_iterations=100, track_iterations=False, **kwargs):
'''
Iterative linear optimization updating the line parameters for passive
AC and DC lines. This is helpful when line expansion is enabled. After each
sucessful solving, line impedances and line resistance are recalculated
based on the optimization result. If warmstart is possible, it uses the
result from the previous iteration to fasten the optimization.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
msq_threshold: float, default 0.05
Maximal mean square difference between optimized line capacity of
the current and the previous iteration. As soon as this threshold is
undercut, and the number of iterations is bigger than 'min_iterations'
the iterative optimization stops
min_iterations : integer, default 1
Minimal number of iteration to run regardless whether the msq_threshold
is already undercut
max_iterations : integer, default 100
Maximal numbder of iterations to run regardless whether msq_threshold
is already undercut
track_iterations: bool, default False
If True, the intermediate branch capacity steps and values of the
objective function are recorded for each iteration. The values of
iteration 0 stand for the starting point.
**kwargs
Keyword arguments of the lopf function which runs at each iteration
'''
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
ext_i = get_extendable_i(n, 'Line')
typed_i = n.lines.query('type != ""').index
ext_untyped_i = ext_i.difference(typed_i)
ext_typed_i = ext_i & typed_i
base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom))
n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i]
def update_line_params(n, s_nom_prev):
factor = n.lines.s_nom_opt / s_nom_prev
for attr, carrier in (('x', 'AC'), ('r', 'DC')):
ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i)
n.lines.loc[ln_i, attr] /= factor[ln_i]
ln_i = ext_i & typed_i
n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i]
def msq_diff(n, s_nom_prev):
lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \
n.lines['s_nom_opt'].mean()
logger.info(f"Mean square difference after iteration {iteration} is "
f"{lines_err}")
return lines_err
def save_optimal_capacities(n, iteration, status):
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt']
setattr(n, f"status_{iteration}", status)
setattr(n, f"objective_{iteration}", n.objective)
n.iteration = iteration
if track_iterations:
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}']
iteration = 1
kwargs['store_basis'] = True
diff = msq_threshold
while diff >= msq_threshold or iteration < min_iterations:
if iteration > max_iterations:
logger.info(f'Iteration {iteration} beyond max_iterations '
f'{max_iterations}. Stopping ...')
break
s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom
kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__()))
status, termination_condition = network_lopf(n, snapshots, **kwargs)
assert status == 'ok', ('Optimization failed with status {status}'
'and termination {termination_condition}')
if track_iterations:
save_optimal_capacities(n, iteration, status)
update_line_params(n, s_nom_prev)
diff = msq_diff(n, s_nom_prev)
iteration += 1
logger.info('Running last lopf with fixed branches, overwrite p_nom '
'for links and s_nom for lines')
ext_links_i = get_extendable_i(n, 'Link')
n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False
n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False
network_lopf(n, snapshots, **kwargs)
n.lines.loc[ext_i, 's_nom_extendable'] = True
n.links.loc[ext_links_i, 'p_nom_extendable'] = True
| def ilopf(n, snapshots=None, msq_threshold=0.05, min_iterations=1,
max_iterations=100, track_iterations=False, **kwargs):
'''
Iterative linear optimization updating the line parameters for passive
AC and DC lines. This is helpful when line expansion is enabled. After each
sucessful solving, line impedances and line resistance are recalculated
based on the optimization result. If warmstart is possible, it uses the
result from the previous iteration to fasten the optimization.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
msq_threshold: float, default 0.05
Maximal mean square difference between optimized line capacity of
the current and the previous iteration. As soon as this threshold is
undercut, and the number of iterations is bigger than 'min_iterations'
the iterative optimization stops
min_iterations : integer, default 1
Minimal number of iteration to run regardless whether the msq_threshold
is already undercut
max_iterations : integer, default 100
Maximal numbder of iterations to run regardless whether msq_threshold
is already undercut
track_iterations: bool, default False
If True, the intermediate branch capacities and values of the
objective function are recorded for each iteration. The values of
iteration 0 stand for the starting point.
**kwargs
Keyword arguments of the lopf function which runs at each iteration
'''
n.lines['carrier'] = n.lines.bus0.map(n.buses.carrier)
ext_i = get_extendable_i(n, 'Line')
typed_i = n.lines.query('type != ""').index
ext_untyped_i = ext_i.difference(typed_i)
ext_typed_i = ext_i & typed_i
base_s_nom = (np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom))
n.lines.loc[ext_typed_i, 'num_parallel'] = (n.lines.s_nom/base_s_nom)[ext_typed_i]
def update_line_params(n, s_nom_prev):
factor = n.lines.s_nom_opt / s_nom_prev
for attr, carrier in (('x', 'AC'), ('r', 'DC')):
ln_i = (n.lines.query('carrier == @carrier').index & ext_untyped_i)
n.lines.loc[ln_i, attr] /= factor[ln_i]
ln_i = ext_i & typed_i
n.lines.loc[ln_i, 'num_parallel'] = (n.lines.s_nom_opt/base_s_nom)[ln_i]
def msq_diff(n, s_nom_prev):
lines_err = np.sqrt((s_nom_prev - n.lines.s_nom_opt).pow(2).mean()) / \
n.lines['s_nom_opt'].mean()
logger.info(f"Mean square difference after iteration {iteration} is "
f"{lines_err}")
return lines_err
def save_optimal_capacities(n, iteration, status):
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_{iteration}'] = n.df(c)[f'{attr}_opt']
setattr(n, f"status_{iteration}", status)
setattr(n, f"objective_{iteration}", n.objective)
n.iteration = iteration
if track_iterations:
for c, attr in pd.Series(nominal_attrs)[n.branch_components].items():
n.df(c)[f'{attr}_opt_0'] = n.df(c)[f'{attr}']
iteration = 1
kwargs['store_basis'] = True
diff = msq_threshold
while diff >= msq_threshold or iteration < min_iterations:
if iteration > max_iterations:
logger.info(f'Iteration {iteration} beyond max_iterations '
f'{max_iterations}. Stopping ...')
break
s_nom_prev = n.lines.s_nom_opt if iteration else n.lines.s_nom
kwargs['warmstart'] = bool(iteration and ('basis_fn' in n.__dir__()))
status, termination_condition = network_lopf(n, snapshots, **kwargs)
assert status == 'ok', ('Optimization failed with status {status}'
'and termination {termination_condition}')
if track_iterations:
save_optimal_capacities(n, iteration, status)
update_line_params(n, s_nom_prev)
diff = msq_diff(n, s_nom_prev)
iteration += 1
logger.info('Running last lopf with fixed branches, overwrite p_nom '
'for links and s_nom for lines')
ext_links_i = get_extendable_i(n, 'Link')
n.lines[['s_nom', 's_nom_extendable']] = n.lines['s_nom_opt'], False
n.links[['p_nom', 'p_nom_extendable']] = n.links['p_nom_opt'], False
network_lopf(n, snapshots, **kwargs)
n.lines.loc[ext_i, 's_nom_extendable'] = True
n.links.loc[ext_links_i, 'p_nom_extendable'] = True
|
30,024 | def _check_box_obs(observation_space: Box, key: str = ""):
"""Check that the observation space is correctly formatted when dealing with a ``Box()`` space.
In particular, it checks:
- that the dimensions are big enough when it is an image, and that the type matches
- that the observation has an expected shape (warn the user if not)
Args:
observation_space: Checks if the Box observation space
key: The observation key
"""
# If image, check the low and high values, the type and the number of channels
# and the shape (minimal value)
if len(observation_space.shape) == 3:
_check_image_input(observation_space)
if len(observation_space.shape) not in [1, 3]:
logger.warn(
f"Your observation {key} has an unconventional shape (neither an image, nor a 1D vector). "
"We recommend you to flatten the observation "
"to have only a 1D vector or use a custom policy to properly process the data."
)
if np.any(np.equal(observation_space.low, -np.inf)):
logger.warn(
"Agent's minimum observation space value is -infinity. This is probably too low."
)
if np.any(np.equal(observation_space.high, np.inf)):
logger.warn(
"Agent's maxmimum observation space value is infinity. This is probably too high"
)
if np.any(np.equal(observation_space.low, observation_space.high)):
logger.warn("Agent's maximum and minimum observation space values are equal")
if np.any(np.greater(observation_space.low, observation_space.high)):
assert False, "Agent's minimum observation value is greater than it's maximum"
if observation_space.low.shape != observation_space.shape:
assert (
False
), "Agent's observation_space.low and observation_space have different shapes"
if observation_space.high.shape != observation_space.shape:
assert (
False
), "Agent's observation_space.high and observation_space have different shapes"
| def _check_box_obs(observation_space: Box, key: str = ""):
"""Check that the observation space is correctly formatted when dealing with a :class:`Box` space.
In particular, it checks:
- that the dimensions are big enough when it is an image, and that the type matches
- that the observation has an expected shape (warn the user if not)
Args:
observation_space: Checks if the Box observation space
key: The observation key
"""
# If image, check the low and high values, the type and the number of channels
# and the shape (minimal value)
if len(observation_space.shape) == 3:
_check_image_input(observation_space)
if len(observation_space.shape) not in [1, 3]:
logger.warn(
f"Your observation {key} has an unconventional shape (neither an image, nor a 1D vector). "
"We recommend you to flatten the observation "
"to have only a 1D vector or use a custom policy to properly process the data."
)
if np.any(np.equal(observation_space.low, -np.inf)):
logger.warn(
"Agent's minimum observation space value is -infinity. This is probably too low."
)
if np.any(np.equal(observation_space.high, np.inf)):
logger.warn(
"Agent's maxmimum observation space value is infinity. This is probably too high"
)
if np.any(np.equal(observation_space.low, observation_space.high)):
logger.warn("Agent's maximum and minimum observation space values are equal")
if np.any(np.greater(observation_space.low, observation_space.high)):
assert False, "Agent's minimum observation value is greater than it's maximum"
if observation_space.low.shape != observation_space.shape:
assert (
False
), "Agent's observation_space.low and observation_space have different shapes"
if observation_space.high.shape != observation_space.shape:
assert (
False
), "Agent's observation_space.high and observation_space have different shapes"
|
3,418 | def _transform_conditions(
conditions: ConditionGroup,
) -> Tuple[ConditionGroup, StatusFilter]:
"""Split conditions into metrics conditions and a filter on session.status"""
if not conditions:
return conditions, None
where, status_filters = zip(*map(_transform_single_condition, conditions))
where = [condition for condition in where if condition is not None]
status_filters = [f for f in status_filters if f is not None]
if status_filters:
status_filters = frozenset.intersection(*status_filters)
else:
status_filters = None
return where, status_filters
| def _transform_into_metrics_conditions(
conditions: ConditionGroup,
) -> Tuple[ConditionGroup, StatusFilter]:
"""Split conditions into metrics conditions and a filter on session.status"""
if not conditions:
return conditions, None
where, status_filters = zip(*map(_transform_single_condition, conditions))
where = [condition for condition in where if condition is not None]
status_filters = [f for f in status_filters if f is not None]
if status_filters:
status_filters = frozenset.intersection(*status_filters)
else:
status_filters = None
return where, status_filters
|
8,861 | def add_common_arguments(parser):
"""Add common and configuration-related arguments to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This functions adds the common arguments for Sopel's command line tools.
It adds the following arguments:
* ``-c``/``--config``: the name of the Sopel config, or its absolute path
* ``--config-dir``: the directory to scan for config files
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
[sopel-command] --config-dir [directory] -c [name]
Then, when the parser parses the command line arguments, it will expose
``config`` and ``configdir`` options that can be used to find and load
Sopel's settings.
The default value for ``config`` is either the value of the environement
variable ``SOPEL_CONFIG``, or the string ``default``.
.. seealso::
The :func:`sopel.cli.utils.load_settings` function uses an ``options``
object from a parser configured with such arguments.
"""
parser.add_argument(
'-c', '--config',
default=os.environ.get('SOPEL_CONFIG') or 'default',
metavar='filename',
dest='config',
help=inspect.cleandoc("""
Use a specific configuration file.
A config name can be given and the configuration file will be
found in Sopel's homedir (defaults to ``~/.sopel/default.cfg``).
An absolute pathname can be provided instead to use an
arbitrary location.
When the ``SOPEL_CONFIG`` environement variable is set and not
empty, it is used as the default value.
"""))
parser.add_argument(
'--config-dir',
default=config.DEFAULT_HOMEDIR,
dest='configdir',
help='Look for configuration files in this directory.')
| def add_common_arguments(parser):
"""Add common and configuration-related arguments to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This functions adds the common arguments for Sopel's command line tools.
It adds the following arguments:
* ``-c``/``--config``: the name of the Sopel config, or its absolute path
* ``--config-dir``: the directory to scan for config files
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
[sopel-command] --config-dir [directory] -c [name]
Then, when the parser parses the command line arguments, it will expose
``config`` and ``configdir`` options that can be used to find and load
Sopel's settings.
The default value for ``config`` is either the value of the environement
variable ``SOPEL_CONFIG``, or the string ``default``.
.. seealso::
The :func:`sopel.cli.utils.load_settings` function uses an ``options``
object from a parser configured with such arguments.
"""
parser.add_argument(
'-c', '--config',
default=os.environ.get('SOPEL_CONFIG') or 'default',
metavar='filename',
dest='config',
help=inspect.cleandoc("""
Use a specific configuration file.
A config name can be given and the configuration file will be
found in Sopel's homedir (defaults to ``~/.sopel/default.cfg``).
An absolute pathname can be provided instead to use an
arbitrary location.
When the ``SOPEL_CONFIG`` environment variable is set and not
empty, it is used as the default value.
"""))
parser.add_argument(
'--config-dir',
default=config.DEFAULT_HOMEDIR,
dest='configdir',
help='Look for configuration files in this directory.')
|
35,151 | def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
| def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
20,534 | def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
# Deal with task
if arguments.list_tasks:
deepseg.models.display_list_tasks()
if arguments.install_task is not None:
for name_model in deepseg.models.TASKS[arguments.install_task]['models']:
deepseg.models.install_model(name_model)
exit(0)
# Deal with input/output
for file in arguments.i:
if not os.path.isfile(file):
parser.error("This file does not exist: {}".format(file))
# Check if at least a model or task has been specified
if arguments.task is None:
parser.error("You need to specify a task.")
# Verify if the task is custom
if len(arguments.task) == 1 and arguments.task[0] in deepseg.models.TASKS:
# Check if all input images are provided
required_contrasts = deepseg.models.get_required_contrasts(arguments.task[0])
n_contrasts = len(required_contrasts)
# Get pipeline model names
name_models = deepseg.models.TASKS[arguments.task[0]]['models']
else:
n_contrasts = len(arguments.i)
name_models = arguments.task
if len(arguments.i) != n_contrasts:
parser.error(
"{} input files found. Please provide all required input files for the task {}, i.e. contrasts: {}."
.format(len(arguments.i), arguments.task, ', '.join(required_contrasts)))
# Check modality order
if len(arguments.i) > 1 and arguments.c is None:
parser.error(
"Please specify the order in which you put the contrasts in the input images (-i) with flag -c, e.g., "
"-c t1 t2")
# Run pipeline by iterating through the models
fname_prior = None
output_filenames = None
for name_model in name_models:
# Check if this is an official model
if name_model in list(deepseg.models.MODELS.keys()):
# If it is, check if it is installed
path_model = deepseg.models.folder(name_model)
if not deepseg.models.is_valid(path_model):
printv("Model {} is not installed. Installing it now...".format(name_model))
deepseg.models.install_model(name_model)
# If it is not, check if this is a path to a valid model
else:
path_model = os.path.abspath(name_model)
if not deepseg.models.is_valid(path_model):
parser.error("The input model is invalid: {}".format(path_model))
# Order input images
if arguments.c is not None:
input_filenames = []
for required_contrast in deepseg.models.MODELS[name_model]['contrasts']:
for provided_contrast, input_filename in zip(arguments.c, arguments.i):
if required_contrast == provided_contrast:
input_filenames.append(input_filename)
else:
input_filenames = arguments.i
# Call segment_nifti
options = {**vars(arguments), "fname_prior": fname_prior}
nii_lst, target_lst = imed_inference.segment_volume(path_model, input_filenames, options=options)
# Delete intermediate outputs
if fname_prior and os.path.isfile(fname_prior) and arguments.r:
logger.info("Remove temporary files...")
os.remove(fname_prior)
output_filenames = []
# Save output seg
for nii_seg, target in zip(nii_lst, target_lst):
if 'o' in options and options['o'] is not None:
# To support if the user adds the extension or not
extension = ".nii.gz" if ".nii.gz" in options['o'] else ".nii" if ".nii" in options['o'] else ""
if extension == "":
fname_seg = options['o'] + target if len(target_lst) > 1 else options['o']
else:
fname_seg = options['o'].replace(extension, target + extension) if len(target_lst) > 1 \
else options['o']
else:
fname_seg = ''.join([sct.image.splitext(input_filenames[0])[0], target + '.nii.gz'])
# If output folder does not exist, create it
path_out = os.path.dirname(fname_seg)
if not (path_out == '' or os.path.exists(path_out)):
os.makedirs(path_out)
nib.save(nii_seg, fname_seg)
output_filenames.append(fname_seg)
# Use the result of the current model as additional input of the next model
fname_prior = fname_seg
for output_filename in output_filenames:
display_viewer_syntax([arguments.i[0], output_filename], colormaps=['gray', 'red'], opacities=['', '0.7'])
| def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
# Deal with task
if arguments.list_tasks:
deepseg.models.display_list_tasks()
if arguments.install_task is not None:
for name_model in deepseg.models.TASKS[arguments.install_task]['models']:
deepseg.models.install_model(name_model)
exit(0)
# Deal with input/output
for file in arguments.i:
if not os.path.isfile(file):
parser.error("This file does not exist: {}".format(file))
# Check if at least a model or task has been specified
if arguments.task is None:
parser.error("You need to specify a task.")
# Verify if the task is part of the "official" tasks, or if it is pointing to paths containing custom models
if len(arguments.task) == 1 and arguments.task[0] in deepseg.models.TASKS:
# Check if all input images are provided
required_contrasts = deepseg.models.get_required_contrasts(arguments.task[0])
n_contrasts = len(required_contrasts)
# Get pipeline model names
name_models = deepseg.models.TASKS[arguments.task[0]]['models']
else:
n_contrasts = len(arguments.i)
name_models = arguments.task
if len(arguments.i) != n_contrasts:
parser.error(
"{} input files found. Please provide all required input files for the task {}, i.e. contrasts: {}."
.format(len(arguments.i), arguments.task, ', '.join(required_contrasts)))
# Check modality order
if len(arguments.i) > 1 and arguments.c is None:
parser.error(
"Please specify the order in which you put the contrasts in the input images (-i) with flag -c, e.g., "
"-c t1 t2")
# Run pipeline by iterating through the models
fname_prior = None
output_filenames = None
for name_model in name_models:
# Check if this is an official model
if name_model in list(deepseg.models.MODELS.keys()):
# If it is, check if it is installed
path_model = deepseg.models.folder(name_model)
if not deepseg.models.is_valid(path_model):
printv("Model {} is not installed. Installing it now...".format(name_model))
deepseg.models.install_model(name_model)
# If it is not, check if this is a path to a valid model
else:
path_model = os.path.abspath(name_model)
if not deepseg.models.is_valid(path_model):
parser.error("The input model is invalid: {}".format(path_model))
# Order input images
if arguments.c is not None:
input_filenames = []
for required_contrast in deepseg.models.MODELS[name_model]['contrasts']:
for provided_contrast, input_filename in zip(arguments.c, arguments.i):
if required_contrast == provided_contrast:
input_filenames.append(input_filename)
else:
input_filenames = arguments.i
# Call segment_nifti
options = {**vars(arguments), "fname_prior": fname_prior}
nii_lst, target_lst = imed_inference.segment_volume(path_model, input_filenames, options=options)
# Delete intermediate outputs
if fname_prior and os.path.isfile(fname_prior) and arguments.r:
logger.info("Remove temporary files...")
os.remove(fname_prior)
output_filenames = []
# Save output seg
for nii_seg, target in zip(nii_lst, target_lst):
if 'o' in options and options['o'] is not None:
# To support if the user adds the extension or not
extension = ".nii.gz" if ".nii.gz" in options['o'] else ".nii" if ".nii" in options['o'] else ""
if extension == "":
fname_seg = options['o'] + target if len(target_lst) > 1 else options['o']
else:
fname_seg = options['o'].replace(extension, target + extension) if len(target_lst) > 1 \
else options['o']
else:
fname_seg = ''.join([sct.image.splitext(input_filenames[0])[0], target + '.nii.gz'])
# If output folder does not exist, create it
path_out = os.path.dirname(fname_seg)
if not (path_out == '' or os.path.exists(path_out)):
os.makedirs(path_out)
nib.save(nii_seg, fname_seg)
output_filenames.append(fname_seg)
# Use the result of the current model as additional input of the next model
fname_prior = fname_seg
for output_filename in output_filenames:
display_viewer_syntax([arguments.i[0], output_filename], colormaps=['gray', 'red'], opacities=['', '0.7'])
|
23,293 | def get_page_tree(topdir,proj_copy_subdir,md,parent=None):
# look for files within topdir
filelist = sorted(os.listdir(topdir))
if 'index.md' in filelist:
# process index.md
try:
node = PageNode(md,os.path.join(topdir,'index.md'),proj_copy_subdir,parent)
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,'index.md')),e.args[0]))
return None
filelist.remove('index.md')
else:
print('Warning: No index.md file in directory {}'.format(topdir))
return None
for name in filelist:
if name[0] != '.' and name[-1] != '~':
if os.path.isdir(os.path.join(topdir,name)):
# recurse into subdirectories
traversedir = True
if not parent==None:
traversedir = not name in parent.copy_subdir
if traversedir:
subnode = get_page_tree( os.path.join(topdir,name),
proj_copy_subdir, md, node )
if subnode: node.subpages.append(subnode)
elif name[-3:] == '.md':
# process subpages
try:
node.subpages.append(PageNode(md,os.path.join(topdir,name),proj_copy_subdir,node))
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,name)),e.args[0]))
continue
else:
node.files.append(name)
return node
| def get_page_tree(topdir,proj_copy_subdir,md,parent=None):
# look for files within topdir
filelist = sorted(os.listdir(topdir))
if 'index.md' in filelist:
# process index.md
try:
node = PageNode(md,os.path.join(topdir,'index.md'),proj_copy_subdir,parent)
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,'index.md')),e.args[0]))
return None
filelist.remove('index.md')
else:
print('Warning: No index.md file in directory {}'.format(topdir))
return None
for name in filelist:
if name[0] != '.' and name[-1] != '~':
if os.path.isdir(os.path.join(topdir,name)):
# recurse into subdirectories
traversedir = True
if parent is not None:
traversedir = not name in parent.copy_subdir
if traversedir:
subnode = get_page_tree( os.path.join(topdir,name),
proj_copy_subdir, md, node )
if subnode: node.subpages.append(subnode)
elif name[-3:] == '.md':
# process subpages
try:
node.subpages.append(PageNode(md,os.path.join(topdir,name),proj_copy_subdir,node))
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,name)),e.args[0]))
continue
else:
node.files.append(name)
return node
|
42,903 | def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
| def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
32,090 | def main():
args = demisto.args()
entry_id = args.get('entryid')
max_depth = arg_to_number(args.get('max_depth', '3'))
if not max_depth or max_depth < 1:
return_error('Minimum max_depth is 1, the script will parse just the top email')
parse_only_headers = argToBoolean(args.get('parse_only_headers', 'false'))
forced_encoding = args.get('forced_encoding')
default_encoding = args.get('default_encoding')
nesting_level_to_return = args.get('nesting_level_to_return', 'All files')
file_type, file_path, file_name = extract_file_info(entry_id)
try:
email_parser = EmailParser(file_path=file_path, max_depth=max_depth, parse_only_headers=parse_only_headers,
file_info=file_type, forced_encoding=forced_encoding,
default_encoding=default_encoding)
output = email_parser.parse()
results = []
if isinstance(output, dict):
output = [output]
elif nesting_level_to_return != 'All files':
output = parse_nesting_level(nesting_level_to_return, output)
for email in output:
if email.get('AttachmentsData'):
for attachment in email.get('AttachmentsData'):
if (name := attachment.get('Name')) and (content := attachment.get('FileData')):
del attachment['FileData']
attachment['FilePath'] = save_file(name, content)
results.append(CommandResults(
outputs_prefix='Email',
outputs=email,
readable_output=data_to_md(email, file_name, email.get('ParentFileName', None),
print_only_headers=parse_only_headers),
raw_response=email))
return_results(results)
except Exception as e:
return_error(str(e) + "\n\nTrace:\n" + traceback.format_exc())
| def main():
args = demisto.args()
entry_id = args.get('entryid')
max_depth = arg_to_number(args.get('max_depth', '3'))
if not max_depth or max_depth < 1:
return_error('Minimum max_depth is 1, the script will parse just the top email')
parse_only_headers = argToBoolean(args.get('parse_only_headers', 'false'))
forced_encoding = args.get('forced_encoding')
default_encoding = args.get('default_encoding')
nesting_level_to_return = args.get('nesting_level_to_return', 'All files')
file_type, file_path, file_name = extract_file_info(entry_id)
try:
email_parser = EmailParser(file_path=file_path, max_depth=max_depth, parse_only_headers=parse_only_headers,
file_info=file_type, forced_encoding=forced_encoding,
default_encoding=default_encoding)
output = email_parser.parse()
results = []
if isinstance(output, dict):
output = [output]
elif output and nesting_level_to_return != 'All files':
output = parse_nesting_level(nesting_level_to_return, output)
for email in output:
if email.get('AttachmentsData'):
for attachment in email.get('AttachmentsData'):
if (name := attachment.get('Name')) and (content := attachment.get('FileData')):
del attachment['FileData']
attachment['FilePath'] = save_file(name, content)
results.append(CommandResults(
outputs_prefix='Email',
outputs=email,
readable_output=data_to_md(email, file_name, email.get('ParentFileName', None),
print_only_headers=parse_only_headers),
raw_response=email))
return_results(results)
except Exception as e:
return_error(str(e) + "\n\nTrace:\n" + traceback.format_exc())
|
5,804 | def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
Calculate :math:`\\int^1_0 a x dx` for :math:`a = 1, 3`
>>> f = lambda x, a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == Inf or a == -Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
| def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
Calculate :math:`\\int^1_0 a x dx` for :math:`a = 1, 3`
>>> f = lambda x, a: a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == Inf or a == -Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
|
8,292 | def load(lib, IM_VERSION, IM_QUANTUM_DEPTH, IM_HDRI):
"""Define Pixel Wand methods. The ImageMagick version is given as
a second argument for comparison. This will quick to determine which
methods are available from the library, and can be implemented as::
if IM_VERSION < 0x700:
# ... do ImageMagick-6 methods ...
else
# ... do ImageMagick-7 methods ...
.. seealso::
#include "wand/pixel-wand.h"
// Or
#include "MagickWand/pixel-wand.h"
Mapping Pixel methods also requires the wand library to evaluate
what "Quantum" is to ImageMagick. We must query the library
to identify if HDRI is enabled, and what the quantum depth is.
.. seealso::
MagickCore/magick-type.h
:param lib: the loaded ``MagickWand`` library.
:type lib: :class:`ctypes.CDLL`
:param IM_VERSION: the ImageMagick version number (i.e. 0x0689).
:type IM_VERSION: :class:`numbers.Integral`
:param IM_QUANTUM_DEPTH: the ImageMagick Quantum Depth
(must be 8, 16, 32, or 64).
:type IM_QUANTUM_DEPTH: :class:`numbers.Integral`
:param IM_HDRI: if ImageMagick was compiled with HDRI support.
:type IM_HDRI: :class:`bool`
.. versionadded:: 0.5.0
"""
if not isinstance(lib, CDLL):
raise AttributeError(repr(lib) + " is not an instanced of ctypes.CDLL")
if not isinstance(IM_VERSION, numbers.Integral):
raise AttributeError("Expecting MagickCore version number")
if IM_QUANTUM_DEPTH not in [8, 16, 32, 65]:
raise AttributeError("QUANTUM_DEPTH must be one of 8, 16, 32, or 64")
is_im_6 = IM_VERSION < 0x700
is_im_7 = IM_VERSION >= 0x700
# Check for IBM Z Systems.
if 's309x' == platform.machine():
FloatType = c_double
else:
FloatType = c_float
if IM_QUANTUM_DEPTH == 8:
QuantumType = FloatType if IM_HDRI else c_ubyte
elif IM_QUANTUM_DEPTH == 16:
QuantumType = FloatType if IM_HDRI else c_ushort
elif IM_QUANTUM_DEPTH == 32:
QuantumType = c_double if IM_HDRI else c_uint
elif IM_QUANTUM_DEPTH == 64:
QuantumType = c_longdouble
lib.ClearPixelWand.argtypes = [c_void_p]
lib.ClonePixelWand.argtypes = [c_void_p]
lib.ClonePixelWand.restype = c_void_p
lib.DestroyPixelWand.argtypes = [c_void_p]
lib.DestroyPixelWand.restype = c_void_p
lib.DestroyPixelWands.argtypes = [POINTER(c_void_p), c_size_t]
lib.DestroyPixelWands.restype = POINTER(c_void_p)
lib.IsPixelWand.argtypes = [c_void_p]
lib.IsPixelWandSimilar.argtypes = [c_void_p, c_void_p, c_double]
lib.NewPixelWand.argtypes = []
lib.NewPixelWand.restype = c_void_p
lib.PixelClearException.argtypes = [c_void_p]
lib.PixelClearException.restype = c_int
lib.PixelGetAlpha.argtypes = [c_void_p]
lib.PixelGetAlpha.restype = c_double
lib.PixelGetAlphaQuantum.argtypes = [c_void_p]
lib.PixelGetAlphaQuantum.restype = QuantumType
lib.PixelGetBlack.argtypes = [c_void_p]
lib.PixelGetBlack.restype = c_double
lib.PixelGetBlackQuantum.argtypes = [c_void_p]
lib.PixelGetBlackQuantum.restype = QuantumType
lib.PixelGetBlue.argtypes = [c_void_p]
lib.PixelGetBlue.restype = c_double
lib.PixelGetBlueQuantum.argtypes = [c_void_p]
lib.PixelGetBlueQuantum.restype = QuantumType
lib.PixelGetColorAsNormalizedString.argtypes = [c_void_p]
lib.PixelGetColorAsNormalizedString.restype = c_magick_char_p
lib.PixelGetColorAsString.argtypes = [c_void_p]
lib.PixelGetColorAsString.restype = c_magick_char_p
lib.PixelGetColorCount.argtypes = [c_void_p]
lib.PixelGetColorCount.restype = c_size_t
lib.PixelGetCyan.argtypes = [c_void_p]
lib.PixelGetCyan.restype = c_double
lib.PixelGetCyanQuantum.argtypes = [c_void_p]
lib.PixelGetCyanQuantum.restype = QuantumType
lib.PixelGetException.argtypes = [c_void_p, POINTER(c_int)]
lib.PixelGetException.restype = c_magick_char_p
lib.PixelGetExceptionType.argtypes = [c_void_p]
lib.PixelGetExceptionType.restype = c_int
lib.PixelGetFuzz.argtypes = [c_void_p]
lib.PixelGetFuzz.restype = c_double
lib.PixelGetGreen.argtypes = [c_void_p]
lib.PixelGetGreen.restype = c_double
lib.PixelGetGreenQuantum.argtypes = [c_void_p]
lib.PixelGetGreenQuantum.restype = QuantumType
lib.PixelGetHSL.argtypes = [c_void_p,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double)]
lib.PixelGetIndex.argtypes = [c_void_p]
lib.PixelGetIndex.restype = QuantumType
lib.PixelGetMagenta.argtypes = [c_void_p]
lib.PixelGetMagenta.restype = c_double
lib.PixelGetMagentaQuantum.argtypes = [c_void_p]
lib.PixelGetMagentaQuantum.restype = QuantumType
lib.PixelGetMagickColor.argtypes = [c_void_p, c_void_p]
if is_im_7:
lib.PixelGetPixel.argtypes = [c_void_p]
lib.PixelGetPixel.restype = c_void_p
lib.PixelGetRed.argtypes = [c_void_p]
lib.PixelGetRed.restype = c_double
lib.PixelGetRedQuantum.argtypes = [c_void_p]
lib.PixelGetRedQuantum.restype = QuantumType
lib.PixelGetYellow.argtypes = [c_void_p]
lib.PixelGetYellow.restype = c_double
lib.PixelGetYellowQuantum.argtypes = [c_void_p]
lib.PixelGetYellowQuantum.restype = QuantumType
lib.PixelSetAlpha.argtypes = [c_void_p, c_double]
lib.PixelSetAlphaQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetBlack.argtypes = [c_void_p, c_double]
lib.PixelSetBlackQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetBlue.argtypes = [c_void_p, c_double]
lib.PixelSetBlueQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetColor.argtypes = [c_void_p, c_char_p]
lib.PixelSetColor.restype = c_int
lib.PixelSetColorCount.argtypes = [c_void_p, c_size_t]
lib.PixelSetCyan.argtypes = [c_void_p, c_double]
lib.PixelSetCyanQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetFuzz.argtypes = [c_void_p, c_double]
lib.PixelSetGreen.argtypes = [c_void_p, c_double]
lib.PixelSetGreenQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetHSL.argtypes = [c_void_p, c_double, c_double, c_double]
lib.PixelSetIndex.argtypes = [c_void_p, QuantumType]
lib.PixelSetMagenta.argtypes = [c_void_p, c_double]
lib.PixelSetMagentaQuantum.argtypes = [c_void_p, QuantumType]
if is_im_6:
lib.PixelSetMagickColor.argtypes = [c_void_p, c_void_p]
else:
lib.PixelSetMagickColor = None
if is_im_7:
lib.PixelSetPixelColor.argtypes = [c_void_p, c_void_p]
else:
lib.PixelSetPixelColor = None
lib.PixelSetRed.argtypes = [c_void_p, c_double]
lib.PixelSetRedQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetYellow.argtypes = [c_void_p, c_double]
lib.PixelSetYellowQuantum.argtypes = [c_void_p, QuantumType]
if is_im_6:
lib.PixelSetMagickColor.argtypes = [c_void_p, c_void_p]
lib.PixelSetPixelColor = None
if is_im_7:
lib.PixelSetMagickColor = None
lib.PixelSetPixelColor.argtypes = [c_void_p, c_void_p]
| def load(lib, IM_VERSION, IM_QUANTUM_DEPTH, IM_HDRI):
"""Define Pixel Wand methods. The ImageMagick version is given as
a second argument for comparison. This will quick to determine which
methods are available from the library, and can be implemented as::
if IM_VERSION < 0x700:
# ... do ImageMagick-6 methods ...
else
# ... do ImageMagick-7 methods ...
.. seealso::
#include "wand/pixel-wand.h"
// Or
#include "MagickWand/pixel-wand.h"
Mapping Pixel methods also requires the wand library to evaluate
what "Quantum" is to ImageMagick. We must query the library
to identify if HDRI is enabled, and what the quantum depth is.
.. seealso::
MagickCore/magick-type.h
:param lib: the loaded ``MagickWand`` library.
:type lib: :class:`ctypes.CDLL`
:param IM_VERSION: the ImageMagick version number (i.e. 0x0689).
:type IM_VERSION: :class:`numbers.Integral`
:param IM_QUANTUM_DEPTH: the ImageMagick Quantum Depth
(must be 8, 16, 32, or 64).
:type IM_QUANTUM_DEPTH: :class:`numbers.Integral`
:param IM_HDRI: if ImageMagick was compiled with HDRI support.
:type IM_HDRI: :class:`bool`
.. versionadded:: 0.5.0
"""
if not isinstance(lib, CDLL):
raise AttributeError(repr(lib) + " is not an instanced of ctypes.CDLL")
if not isinstance(IM_VERSION, numbers.Integral):
raise AttributeError("Expecting MagickCore version number")
if IM_QUANTUM_DEPTH not in [8, 16, 32, 65]:
raise AttributeError("QUANTUM_DEPTH must be one of 8, 16, 32, or 64")
is_im_6 = IM_VERSION < 0x700
is_im_7 = IM_VERSION >= 0x700
# Check for IBM Z Systems.
if 's390x' == platform.machine():
FloatType = c_double
else:
FloatType = c_float
if IM_QUANTUM_DEPTH == 8:
QuantumType = FloatType if IM_HDRI else c_ubyte
elif IM_QUANTUM_DEPTH == 16:
QuantumType = FloatType if IM_HDRI else c_ushort
elif IM_QUANTUM_DEPTH == 32:
QuantumType = c_double if IM_HDRI else c_uint
elif IM_QUANTUM_DEPTH == 64:
QuantumType = c_longdouble
lib.ClearPixelWand.argtypes = [c_void_p]
lib.ClonePixelWand.argtypes = [c_void_p]
lib.ClonePixelWand.restype = c_void_p
lib.DestroyPixelWand.argtypes = [c_void_p]
lib.DestroyPixelWand.restype = c_void_p
lib.DestroyPixelWands.argtypes = [POINTER(c_void_p), c_size_t]
lib.DestroyPixelWands.restype = POINTER(c_void_p)
lib.IsPixelWand.argtypes = [c_void_p]
lib.IsPixelWandSimilar.argtypes = [c_void_p, c_void_p, c_double]
lib.NewPixelWand.argtypes = []
lib.NewPixelWand.restype = c_void_p
lib.PixelClearException.argtypes = [c_void_p]
lib.PixelClearException.restype = c_int
lib.PixelGetAlpha.argtypes = [c_void_p]
lib.PixelGetAlpha.restype = c_double
lib.PixelGetAlphaQuantum.argtypes = [c_void_p]
lib.PixelGetAlphaQuantum.restype = QuantumType
lib.PixelGetBlack.argtypes = [c_void_p]
lib.PixelGetBlack.restype = c_double
lib.PixelGetBlackQuantum.argtypes = [c_void_p]
lib.PixelGetBlackQuantum.restype = QuantumType
lib.PixelGetBlue.argtypes = [c_void_p]
lib.PixelGetBlue.restype = c_double
lib.PixelGetBlueQuantum.argtypes = [c_void_p]
lib.PixelGetBlueQuantum.restype = QuantumType
lib.PixelGetColorAsNormalizedString.argtypes = [c_void_p]
lib.PixelGetColorAsNormalizedString.restype = c_magick_char_p
lib.PixelGetColorAsString.argtypes = [c_void_p]
lib.PixelGetColorAsString.restype = c_magick_char_p
lib.PixelGetColorCount.argtypes = [c_void_p]
lib.PixelGetColorCount.restype = c_size_t
lib.PixelGetCyan.argtypes = [c_void_p]
lib.PixelGetCyan.restype = c_double
lib.PixelGetCyanQuantum.argtypes = [c_void_p]
lib.PixelGetCyanQuantum.restype = QuantumType
lib.PixelGetException.argtypes = [c_void_p, POINTER(c_int)]
lib.PixelGetException.restype = c_magick_char_p
lib.PixelGetExceptionType.argtypes = [c_void_p]
lib.PixelGetExceptionType.restype = c_int
lib.PixelGetFuzz.argtypes = [c_void_p]
lib.PixelGetFuzz.restype = c_double
lib.PixelGetGreen.argtypes = [c_void_p]
lib.PixelGetGreen.restype = c_double
lib.PixelGetGreenQuantum.argtypes = [c_void_p]
lib.PixelGetGreenQuantum.restype = QuantumType
lib.PixelGetHSL.argtypes = [c_void_p,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double)]
lib.PixelGetIndex.argtypes = [c_void_p]
lib.PixelGetIndex.restype = QuantumType
lib.PixelGetMagenta.argtypes = [c_void_p]
lib.PixelGetMagenta.restype = c_double
lib.PixelGetMagentaQuantum.argtypes = [c_void_p]
lib.PixelGetMagentaQuantum.restype = QuantumType
lib.PixelGetMagickColor.argtypes = [c_void_p, c_void_p]
if is_im_7:
lib.PixelGetPixel.argtypes = [c_void_p]
lib.PixelGetPixel.restype = c_void_p
lib.PixelGetRed.argtypes = [c_void_p]
lib.PixelGetRed.restype = c_double
lib.PixelGetRedQuantum.argtypes = [c_void_p]
lib.PixelGetRedQuantum.restype = QuantumType
lib.PixelGetYellow.argtypes = [c_void_p]
lib.PixelGetYellow.restype = c_double
lib.PixelGetYellowQuantum.argtypes = [c_void_p]
lib.PixelGetYellowQuantum.restype = QuantumType
lib.PixelSetAlpha.argtypes = [c_void_p, c_double]
lib.PixelSetAlphaQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetBlack.argtypes = [c_void_p, c_double]
lib.PixelSetBlackQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetBlue.argtypes = [c_void_p, c_double]
lib.PixelSetBlueQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetColor.argtypes = [c_void_p, c_char_p]
lib.PixelSetColor.restype = c_int
lib.PixelSetColorCount.argtypes = [c_void_p, c_size_t]
lib.PixelSetCyan.argtypes = [c_void_p, c_double]
lib.PixelSetCyanQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetFuzz.argtypes = [c_void_p, c_double]
lib.PixelSetGreen.argtypes = [c_void_p, c_double]
lib.PixelSetGreenQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetHSL.argtypes = [c_void_p, c_double, c_double, c_double]
lib.PixelSetIndex.argtypes = [c_void_p, QuantumType]
lib.PixelSetMagenta.argtypes = [c_void_p, c_double]
lib.PixelSetMagentaQuantum.argtypes = [c_void_p, QuantumType]
if is_im_6:
lib.PixelSetMagickColor.argtypes = [c_void_p, c_void_p]
else:
lib.PixelSetMagickColor = None
if is_im_7:
lib.PixelSetPixelColor.argtypes = [c_void_p, c_void_p]
else:
lib.PixelSetPixelColor = None
lib.PixelSetRed.argtypes = [c_void_p, c_double]
lib.PixelSetRedQuantum.argtypes = [c_void_p, QuantumType]
lib.PixelSetYellow.argtypes = [c_void_p, c_double]
lib.PixelSetYellowQuantum.argtypes = [c_void_p, QuantumType]
if is_im_6:
lib.PixelSetMagickColor.argtypes = [c_void_p, c_void_p]
lib.PixelSetPixelColor = None
if is_im_7:
lib.PixelSetMagickColor = None
lib.PixelSetPixelColor.argtypes = [c_void_p, c_void_p]
|
43,767 | def derivative(H, x, i, delta=0.005291772):
r"""Compute the derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian
with respect to the :math:`i`-th nuclear coordinate using a central difference approximation.
.. math::
\frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2)
- \hat{H}(x_i-\delta/2)}{\delta}
Args:
H (callable): function with signature ``H(x)`` that builds the electronic
Hamiltonian of the molecule for a given set of nuclear coordinates ``x``
x (array[float]): 1D array with the nuclear coordinates given in Angstroms.
The size of the array should be ``3*N`` where ``N`` is the number of atoms
in the molecule.
i (int): index of the nuclear coordinate involved in the derivative
:math:`\partial \hat{H}(x)/\partial x_i`
delta (float): Step size in Angstroms used to displace the nuclear coordinate.
Its default value corresponds to 0.01 Bohr radius.
Returns:
pennylane.Hamiltonian: the derivative of the Hamiltonian
:math:`\partial \hat{H}(x)/\partial x_i`
**Example**
>>> def H(x):
... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0]
>>> x = np.array([0., 0., 0.35, 0., 0., -0.35])
>>> print(derivative(H, x, 2))
(-0.7763135743293005) [I0]
+ (-0.08534360840293387) [Z0]
+ (-0.08534360840293387) [Z1]
+ (0.2669341092545041) [Z2]
+ (0.26693410925450134) [Z3]
+ (-0.025233628744274508) [Z0 Z1]
+ (0.0072162443961340415) [Y0 X1 X2 Y3]
+ (-0.0072162443961340415) [Y0 Y1 X2 X3]
+ (-0.0072162443961340415) [X0 X1 Y2 Y3]
+ (0.0072162443961340415) [X0 Y1 Y2 X3]
+ (-0.030654287745411964) [Z0 Z2]
+ (-0.023438043349280003) [Z0 Z3]
+ (-0.023438043349280003) [Z1 Z2]
+ (-0.030654287745411964) [Z1 Z3]
+ (-0.02494407786332001) [Z2 Z3]
"""
to_bohr = 1.8897261254535
# plus
x_plus = x.copy()
x_plus[i] += delta * 0.5
# minus
x_minus = x.copy()
x_minus[i] -= delta * 0.5
return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1
| def derivative(H, x, i, delta=0.005291772):
r"""Compute the derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian
with respect to the :math:`i`-th nuclear coordinate using a central difference approximation.
.. math::
\frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2)
- \hat{H}(x_i-\delta/2)}{\delta}
Args:
H (callable): function with signature ``H(x)`` that builds the electronic
Hamiltonian of the molecule for a given set of nuclear coordinates ``x``
x (array[float]): 1D array with the nuclear coordinates given in Angstroms.
The size of the array should be ``3*N`` where ``N`` is the number of atoms
in the molecule.
i (int): index of the nuclear coordinate involved in the derivative
:math:`\partial \hat{H}(x)/\partial x_i`
delta (float): Step size in Angstroms used to displace the nuclear coordinate in the finite difference approximation.
Its default value corresponds to 0.01 Bohr radius.
Returns:
pennylane.Hamiltonian: the derivative of the Hamiltonian
:math:`\partial \hat{H}(x)/\partial x_i`
**Example**
>>> def H(x):
... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0]
>>> x = np.array([0., 0., 0.35, 0., 0., -0.35])
>>> print(derivative(H, x, 2))
(-0.7763135743293005) [I0]
+ (-0.08534360840293387) [Z0]
+ (-0.08534360840293387) [Z1]
+ (0.2669341092545041) [Z2]
+ (0.26693410925450134) [Z3]
+ (-0.025233628744274508) [Z0 Z1]
+ (0.0072162443961340415) [Y0 X1 X2 Y3]
+ (-0.0072162443961340415) [Y0 Y1 X2 X3]
+ (-0.0072162443961340415) [X0 X1 Y2 Y3]
+ (0.0072162443961340415) [X0 Y1 Y2 X3]
+ (-0.030654287745411964) [Z0 Z2]
+ (-0.023438043349280003) [Z0 Z3]
+ (-0.023438043349280003) [Z1 Z2]
+ (-0.030654287745411964) [Z1 Z3]
+ (-0.02494407786332001) [Z2 Z3]
"""
to_bohr = 1.8897261254535
# plus
x_plus = x.copy()
x_plus[i] += delta * 0.5
# minus
x_minus = x.copy()
x_minus[i] -= delta * 0.5
return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1
|
44,201 | def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
| def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where :math:`n` is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
57,768 | def apply_security_profile_command(profile_name: str, profile_type: str, rule_name: str, pre_post: str = None):
if DEVICE_GROUP: # Panorama instance
if not pre_post:
raise Exception('Please provide the pre_post argument when applying profiles to rules in '
'Panorama instance.')
panorama_xpath = f"{XPATH_RULEBASE}{pre_post}/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}",
apply_security_profile(panorama_xpath, profile_name)
return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')
else: # firewall instance
firewall_xpath = f"{XPATH_RULEBASE}rulebase/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}"
apply_security_profile(firewall_xpath, profile_name)
return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')
| def apply_security_profile_command(profile_name: str, profile_type: str, rule_name: str, pre_post: str = None):
if DEVICE_GROUP: # Panorama instance
if not pre_post:
raise Exception('Please provide the pre_post argument when applying profiles to rules in '
'Panorama instance.')
xpath = f"{XPATH_RULEBASE}{pre_post}/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}"
else: # firewall instance
xpath = f"{XPATH_RULEBASE}rulebase/security/rules/entry[@name='{rule_name}']/profile-setting/"\
f"profiles/{profile_type}"
apply_security_profile(firewall_xpath, profile_name)
return_results(f'The profile {profile_name} has been applied to the rule {rule_name}')
|
53,776 | def get_active_assets(n, c, investment_period, snapshots):
"""
Getter function. Get the index of elements which are active of a given
component c, depending on lifetime and build year for a investment_period
investment_period (assuming that component is already used in build year)
"""
# component only active during lifetime
df = n.df(c).copy()
index_active = ((df["build_year"]<= investment_period) &
(investment_period<df[["build_year", "lifetime"]].sum(axis=1)))
return index_active
| def get_active_assets(n, c, investment_period, snapshots):
"""
Getter function. Get the index of elements which are active of a given
component c, depending on lifetime and build year for an investment_period
(assuming that component is already used in build year).
"""
# component only active during lifetime
df = n.df(c).copy()
index_active = ((df["build_year"]<= investment_period) &
(investment_period<df[["build_year", "lifetime"]].sum(axis=1)))
return index_active
|
32,315 | def create_nic_command(client: MsGraphClient, args: dict):
response = client.create_nic(args)
# Retrieve relevant properties to return to context
nic_name = response.get('name').lower()
nic_id = response.get('id')
location = response.get('location')
properties = response.get('properties')
network_security_group = properties.get('networkSecurityGroup', {}).get('id', 'NA')
provisioning_state = properties.get('provisioningState', "NA")
ip_configurations = properties.get('ipConfigurations', [])
dns_suffix = properties.get('dnsSettings', {}).get('internalDomainNameSuffix')
ip_configs = []
for ip_configuration in ip_configurations:
ip_configs.append({
"ConfigName": ip_configuration.get('name', "NA"),
"ConfigID": ip_configuration.get('id', "NA"),
"PrivateIPAddress": ip_configuration.get('properties', {}).get('privateIPAddress', "NA"),
"PublicIPAddressID": ip_configuration.get('properties', {}).get('publicIPAddress', {}).get('id', "NA"),
"SubNet": ip_configuration.get('properties', {}).get('subnet', {}).get('id', "NA"),
})
nic = {
'Name': nic_name,
'ID': nic_id,
'IPConfigurations': ip_configs,
'ProvisioningState': provisioning_state,
'Location': location,
'ResourceGroup': args.get('resource_group'),
'NetworkSecurityGroup': network_security_group,
'DNSSuffix': dns_suffix
}
title = f'Created Network Interface "{nic_name}"'
human_readable = tableToMarkdown(title, nic, removeNull=True)
entry_context = {'Azure.NetworkInterfaces(val.ID && val.Name === obj.ID)': nic}
return human_readable, entry_context, response
| def create_nic_command(client: MsGraphClient, args: dict):
response = client.create_nic(args)
# Retrieve relevant properties to return to context
nic_name = response.get('name').lower()
nic_id = response.get('id')
location = response.get('location')
properties = response.get('properties')
network_security_group = properties.get('networkSecurityGroup', {}).get('id', 'NA')
provisioning_state = properties.get('provisioningState', "NA")
ip_configurations = properties.get('ipConfigurations', [])
dns_suffix = properties.get('dnsSettings', {}).get('internalDomainNameSuffix')
ip_configs = []
for ip_configuration in ip_configurations:
ip_configs.append({
"ConfigName": ip_configuration.get('name', "NA"),
"ConfigID": ip_configuration.get('id', "NA"),
"PrivateIPAddress": ip_configuration.get('properties', {}).get('privateIPAddress', "NA"),
"PublicIPAddressID": ip_configuration.get('properties', {}).get('publicIPAddress', {}).get('id', "NA"),
"SubNet": ip_configuration.get('properties', {}).get('subnet', {}).get('id', "NA"),
})
nic = {
'Name': nic_name,
'ID': nic_id,
'IPConfigurations': ip_configs,
'ProvisioningState': provisioning_state,
'Location': location,
'ResourceGroup': resource_group,
'NetworkSecurityGroup': network_security_group,
'DNSSuffix': dns_suffix
}
title = f'Created Network Interface "{nic_name}"'
human_readable = tableToMarkdown(title, nic, removeNull=True)
entry_context = {'Azure.NetworkInterfaces(val.ID && val.Name === obj.ID)': nic}
return human_readable, entry_context, response
|
8,019 | def test_add_components():
"""Test adding multipe elements or nuclides at once"""
m = openmc.Material()
components = {'H1': 2.0,
'O16': 1.0,
'Zr': 1.0,
'O': 1.0,
'Ag110m': 1.0,
'U': {'percent': 1.0,
'enrichment': 4.5},
'Li': {'percent': 1.0,
'enrichment': 60.0,
'enrichment_target': 'Li7'},
'H': {'percent': 1.0,
'enrichment': 50.0,
'enrichment_target': 'H2',
'enrichment_type': 'wo'}}
m.add_components(components)
with pytest.raises(ValueError):
m.add_components({'U': {'percent': 1.0,
'enrichment': 100.0}})
with pytest.raises(ValueError):
m.add_components({'Pu': {'percent': 1.0,
'enrichment': 3.0}})
with pytest.raises(ValueError):
m.add_components({'U': {'percent': 1.0,
'enrichment': 70.0,
'enrichment_target':'U235'}})
with pytest.raises(ValueError):
m.add_components({'He': {'percent': 1.0,
'enrichment': 17.0,
'enrichment_target': 'He6'}})
with pytest.raises(ValueError):
m.add_components({'li': 1.0}) # should fail as 1st char is lowercase
with pytest.raises(ValueError):
m.add_components({'LI': 1.0}) # should fail as 2nd char is uppercase
with pytest.raises(ValueError):
m.add_components({'Xx': 1.0}) # should fail as Xx is not an element
with pytest.raises(ValueError):
m.add_components({'n': 1.0}) # check to avoid n for neutron being accepted
with pytest.raises(TypeError):
m.add_components({'H1': '1.0'})
with pytest.raises(TypeError):
m.add_components({1.0: 'H1'}, percent_type = 'wo')
with pytest.raises(ValueError):
m.add_components({'H1': 1.0}, percent_type = 'oa')
| def test_add_components():
"""Test adding multipe elements or nuclides at once"""
m = openmc.Material()
components = {'H1': 2.0,
'O16': 1.0,
'Zr': 1.0,
'O': 1.0,
'Ag110_m1': 1.0,
'U': {'percent': 1.0,
'enrichment': 4.5},
'Li': {'percent': 1.0,
'enrichment': 60.0,
'enrichment_target': 'Li7'},
'H': {'percent': 1.0,
'enrichment': 50.0,
'enrichment_target': 'H2',
'enrichment_type': 'wo'}}
m.add_components(components)
with pytest.raises(ValueError):
m.add_components({'U': {'percent': 1.0,
'enrichment': 100.0}})
with pytest.raises(ValueError):
m.add_components({'Pu': {'percent': 1.0,
'enrichment': 3.0}})
with pytest.raises(ValueError):
m.add_components({'U': {'percent': 1.0,
'enrichment': 70.0,
'enrichment_target':'U235'}})
with pytest.raises(ValueError):
m.add_components({'He': {'percent': 1.0,
'enrichment': 17.0,
'enrichment_target': 'He6'}})
with pytest.raises(ValueError):
m.add_components({'li': 1.0}) # should fail as 1st char is lowercase
with pytest.raises(ValueError):
m.add_components({'LI': 1.0}) # should fail as 2nd char is uppercase
with pytest.raises(ValueError):
m.add_components({'Xx': 1.0}) # should fail as Xx is not an element
with pytest.raises(ValueError):
m.add_components({'n': 1.0}) # check to avoid n for neutron being accepted
with pytest.raises(TypeError):
m.add_components({'H1': '1.0'})
with pytest.raises(TypeError):
m.add_components({1.0: 'H1'}, percent_type = 'wo')
with pytest.raises(ValueError):
m.add_components({'H1': 1.0}, percent_type = 'oa')
|
28,342 | def get_experiments(conn: ConnectionPlus) -> list[tuple[int]]:
"""Get a list of experiments
Args:
conn: database connection
Returns:
list of rows
"""
sql = "SELECT exp_id FROM experiments"
c = atomic_transaction(conn, sql)
return c.fetchall()
| def get_experiments(conn: ConnectionPlus) -> list[tuple[int, ...]]:
"""Get a list of experiments
Args:
conn: database connection
Returns:
list of rows
"""
sql = "SELECT exp_id FROM experiments"
c = atomic_transaction(conn, sql)
return c.fetchall()
|
30,917 | def docker_container_details() -> dict:
""" Gather docker container SSL/TLS Certificate information (Which set by demisto engine), The following details:
1. Global veriables which used by requests module:
a. SSL_CERT_FILE
b. REQUESTS_CA_BUNDLE
2. Custom python ssl file located in docker container - /etc/custom-python-ssl/certs.pem
Returns:
dict: Corresponding enrty context.
"""
container_ca_file = Path('/etc/custom-python-ssl/certs.pem')
certificates = "" if not container_ca_file.is_file() else container_ca_file.read_text()
return {
"ShellVariables": {
"SSL_CERT_FILE": os.environ.get('SSL_CERT_FILE'),
"CERT_FILE": os.environ.get('REQUESTS_CA_BUNDLE')
},
"CustomCertificateAuthorities": parse_all_certificates(certificates)
}
| def docker_container_details() -> dict:
""" Gather docker container SSL/TLS Certificate information (Which set by demisto engine), The following details:
1. Global veriables which used by requests module:
a. SSL_CERT_FILE
b. REQUESTS_CA_BUNDLE
2. Custom python ssl file located in docker container - /etc/custom-python-ssl/certs.pem
Returns:
dict: Corresponding enrty context.
"""
container_ca_file = Path('/etc/custom-python-ssl/certs.pem')
certificates = "" if not container_ca_file.is_file() else container_ca_file.read_text()
return {
"ShellVariables": {
"SSL_CERT_FILE": os.environ.get('SSL_CERT_FILE'),
"CERT_FILE": os.environ.get('REQUESTS_CA_BUNDLE'),
},
"CustomCertificateAuthorities": parse_all_certificates(certificates)
}
|
1,105 | def _rebase_path(value, cwd):
if isinstance(value, list):
return [_rebase_path(v, cwd) for v in value]
try:
value = Path(value)
except TypeError:
pass
else:
try:
value = Path(value).relative_to(cwd)
except ValueError:
pass
return value
| def _rebase_path(value, cwd):
if isinstance(value, list):
return [_rebase_path(v, cwd) for v in value]
try:
value = Path(value)
except TypeError:
pass
else:
try:
value = value.relative_to(cwd)
except ValueError:
pass
return value
|
6,258 | def test_SandboxMetadataDB():
smDB = SandboxMetadataDB()
seNameToUse = "ProductionSandboxSE"
sbPath = "/sb/pfn/1.tar.bz2"
assignTo = {"adminusername": "dirac_admin"}
res = smDB.registerAndGetSandbox(
"adminusername", "/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser", "dirac_admin", seNameToUse, sbPath, 123
)
assert res["OK"], res["Message"]
sbURL = "SB:%s|%s" % (seNameToUse, sbPath)
assignTo = dict([(key, [(sbURL, assignTo[key])]) for key in assignTo])
res = smDB.assignSandboxesToEntities(assignTo, "adminusername", "dirac_admin", "enSetup")
assert res["OK"], res["Message"]
| def test_SandboxMetadataDB():
smDB = SandboxMetadataDB()
seNameToUse = "ProductionSandboxSE"
sbPath = "/sb/pfn/1.tar.bz2"
assignTo = {"adminusername": "dirac_admin"}
res = smDB.registerAndGetSandbox(
"adminusername", "/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser", "dirac_admin", seNameToUse, sbPath, 123
)
assert res["OK"], res["Message"]
sbURL = f"SB:{seNameToUse}|{sbPath}"
assignTo = dict([(key, [(sbURL, assignTo[key])]) for key in assignTo])
res = smDB.assignSandboxesToEntities(assignTo, "adminusername", "dirac_admin", "enSetup")
assert res["OK"], res["Message"]
|
23,607 | def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
| def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves are calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
35,294 | def get_color_dtype(data, column_names):
has_color = True
for column_name in column_names:
if column_name not in data["points"]:
has_color = False
break
if has_color:
color_data_types = [data["points"][column_name].dtype for column_name in column_names]
assert len(set(color_data_types)) == 1
color_data_type = color_data_types[0]
else:
color_data_type = None
return color_data_type
| def get_color_dtype(data, column_names):
has_color = all(column in data["points"] for column in column_names)
if has_color:
color_data_types = [data["points"][column_name].dtype for column_name in column_names]
assert len(set(color_data_types)) == 1
color_data_type = color_data_types[0]
else:
color_data_type = None
return color_data_type
|
13,064 | def install_app(
app_installation: AppInstallation,
activate: bool = False,
):
response = requests.get(app_installation.manifest_url, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
assigned_permissions = app_installation.permissions.all()
manifest_data = response.json()
manifest_data["permissions"] = get_permission_names(assigned_permissions)
clean_manifest_data(manifest_data)
app = App.objects.create(
name=app_installation.app_name,
is_active=activate,
identifier=manifest_data.get("id"),
about_app=manifest_data.get("about"),
data_privacy=manifest_data.get("dataPrivacy"),
data_privacy_url=manifest_data.get("dataPrivacyUrl"),
homepage_url=manifest_data.get("homepageUrl"),
support_url=manifest_data.get("supportUrl"),
configuration_url=manifest_data.get("configurationUrl"),
app_url=manifest_data.get("appUrl"),
version=manifest_data.get("version"),
type=AppType.THIRDPARTY,
)
app.permissions.set(app_installation.permissions.all())
for extension_data in manifest_data.get("extensions", []):
extension = AppExtension.objects.create(
app=app,
label=extension_data.get("label"),
url=extension_data.get("url"),
view=extension_data.get("view"),
type=extension_data.get("type"),
target=extension_data.get("target"),
open_as=extension_data.get("open_as") or AppExtensionOpenAs.POPUP,
)
extension.permissions.set(extension_data.get("permissions", []))
token = app.tokens.create(name="Default token")
try:
send_app_token(
target_url=manifest_data.get("tokenTargetUrl"), token=token.auth_token
)
except requests.RequestException as e:
app.delete()
raise e
return app
| def install_app(
app_installation: AppInstallation,
activate: bool = False,
):
response = requests.get(app_installation.manifest_url, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
assigned_permissions = app_installation.permissions.all()
manifest_data = response.json()
manifest_data["permissions"] = get_permission_names(assigned_permissions)
clean_manifest_data(manifest_data)
app = App.objects.create(
name=app_installation.app_name,
is_active=activate,
identifier=manifest_data.get("id"),
about_app=manifest_data.get("about"),
data_privacy=manifest_data.get("dataPrivacy"),
data_privacy_url=manifest_data.get("dataPrivacyUrl"),
homepage_url=manifest_data.get("homepageUrl"),
support_url=manifest_data.get("supportUrl"),
configuration_url=manifest_data.get("configurationUrl"),
app_url=manifest_data.get("appUrl"),
version=manifest_data.get("version"),
type=AppType.THIRDPARTY,
)
app.permissions.set(app_installation.permissions.all())
for extension_data in manifest_data.get("extensions", []):
extension = AppExtension.objects.create(
app=app,
label=extension_data.get("label"),
url=extension_data.get("url"),
view=extension_data.get("view"),
type=extension_data.get("type"),
target=extension_data.get("target"),
open_as=extension_data.get("open_as", AppExtensionOpenAs.POPUP),
)
extension.permissions.set(extension_data.get("permissions", []))
token = app.tokens.create(name="Default token")
try:
send_app_token(
target_url=manifest_data.get("tokenTargetUrl"), token=token.auth_token
)
except requests.RequestException as e:
app.delete()
raise e
return app
|
32,348 | def fetch_indicators():
"""Retrieve vulnerability data from Exodus Intelligence.
Returns:
Bool: True/False based on success or failure
"""
score = 0.0
indicators = []
formatted_list = []
min_xi = 0 if MIN_XI == "" else MIN_XI
max_xi = 10 if MAX_XI == "" else MAX_XI
try:
exodus = connect()
demisto.debug("Connected to server")
recent_vulns = exodus.get_recent_vulns()
try:
data = recent_vulns["data"]["items"]
except KeyError as e:
demisto.debug(f"There was an error getting the data {e}")
demisto.debug(f"Fetched {len(data)} total vulnerabilities")
for item in data:
try:
cve = item["cves"][0]
report_data = {"cve": cve, "identifier": item["identifier"]}
if score >= min_xi and score <= max_xi:
report = exodus.get_report(cve)
vulnerability = exodus.get_vuln(cve)
if report["ok"] is True:
report_data = extract_data(report, report_data)
vuln_data = extract_data(vulnerability, report_data)
formatted_list.append(vuln_data)
except KeyError as e:
demisto.debug(f"There was a problem: {e}")
except Exception as e:
demisto.debug(f"Something went wrong: {e}")
return False
if len(formatted_list):
for items in formatted_list:
try:
indicator = {
"value": items["identifier"],
"type": "Exodus Intelligence",
"fields": items,
}
indicators.append(indicator)
except KeyError as e:
demisto.debug(f"There was a problem creating indicators: {e}")
demisto.createIndicators(indicators)
return True
| def fetch_indicators():
"""Retrieve vulnerability data from Exodus Intelligence.
Returns:
Bool: True/False based on success or failure
"""
score = 0.0
indicators = []
formatted_list = []
min_xi = 0 if MIN_XI == "" else MIN_XI
max_xi = 10 if MAX_XI == "" else MAX_XI
try:
exodus = connect()
demisto.debug("Connected to server")
recent_vulns = exodus.get_recent_vulns()
try:
data = recent_vulns["data"]["items"]
except KeyError as e:
demisto.debug(f"There was an error getting the data {e}")
demisto.debug(f"Fetched {len(data)} total vulnerabilities")
for item in data:
try:
cve = item["cves"][0]
report_data = {"cve": cve, "identifier": item["identifier"]}
if score >= min_xi and score <= max_xi:
report = exodus.get_report(cve)
vulnerability = exodus.get_vuln(cve)
if report["ok"] is True:
report_data = extract_data(report, report_data)
vuln_data = extract_data(vulnerability, report_data)
formatted_list.append(vuln_data)
except KeyError as e:
demisto.debug(f"There was a problem: {e}")
except Exception as e:
demisto.debug(f"Something went wrong: {e}")
return False
if formatted_list:
for items in formatted_list:
try:
indicator = {
"value": items["identifier"],
"type": "Exodus Intelligence",
"fields": items,
}
indicators.append(indicator)
except KeyError as e:
demisto.debug(f"There was a problem creating indicators: {e}")
demisto.createIndicators(indicators)
return True
|
2,049 | def mean_variance_axis(X, axis):
"""Compute mean and variance along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
Returns
-------
means : ndarray of float of shape (n_features,)
Feature-wise means.
variances : ndarray of float of shape (n_features,)
Feature-wise variances.
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
| def mean_variance_axis(X, axis):
"""Compute mean and variance along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
Returns
-------
means : ndarray of float of shape (n_features,)
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=float
Feature-wise variances.
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
|
21,483 | def measure_func(name: str = None) -> Callable[[T], T]:
"""
Used to decorate an async function with a `Measure` context manager.
Usage:
@measure_func()
async def foo(...):
...
Which is analogous to:
async def foo(...):
with Measure(...):
...
"""
def wrapper(func: T) -> T:
block_name = func.__name__ if name is None else name
@wraps(func)
async def measured_func(self, *args, **kwargs):
with Measure(self.clock, block_name):
r = await func(self, *args, **kwargs)
return r
return cast(T, measured_func)
return wrapper
| def measure_func(name: Optional[str] = None) -> Callable[[T], T]:
"""
Used to decorate an async function with a `Measure` context manager.
Usage:
@measure_func()
async def foo(...):
...
Which is analogous to:
async def foo(...):
with Measure(...):
...
"""
def wrapper(func: T) -> T:
block_name = func.__name__ if name is None else name
@wraps(func)
async def measured_func(self, *args, **kwargs):
with Measure(self.clock, block_name):
r = await func(self, *args, **kwargs)
return r
return cast(T, measured_func)
return wrapper
|
45,694 | def det_cont_fcst_compute(err, scores):
"""Compute simple and skill scores for deterministic continuous forecasts
from a verification error object.
Parameters
----------
err : dict
A verification error object initialized with
:py:func:`pysteps.verification.detcontscores.det_cont_fcst_init` and
populated with
:py:func:`pysteps.verification.detcontscores.det_cont_fcst_accum`.
scores : string or list of strings
The name(s) of the scores. The list of possible score names is:
.. tabularcolumns:: |p{2cm}|L|
+------------+--------------------------------------------------------+
| Name | Description |
+============+========================================================+
| beta | linear regression slope (conditional bias) |
+------------+--------------------------------------------------------+
| corr_p | pearson's correleation coefficien (linear correlation) |
+------------+--------------------------------------------------------+
| DRMSE | debiased root mean squared error |
+------------+--------------------------------------------------------+
| MAE | mean absolute error |
+------------+--------------------------------------------------------+
| ME | mean error or bias |
+------------+--------------------------------------------------------+
| MSE | mean squared error |
+------------+--------------------------------------------------------+
| RMSE | root mean squared error |
+------------+--------------------------------------------------------+
| RV | reduction of variance |
| | (Brier Score, Nash-Sutcliffe Efficiency) |
+------------+--------------------------------------------------------+
Returns
-------
result : list
list containing the verification results
"""
# catch case of single score passed as string
def get_iterable(x):
if isinstance(x, collections.Iterable) and not isinstance(x, str):
return x
else:
return (x,)
scores = get_iterable(scores)
result = []
for score in scores:
# catch None passed as score
if score is None:
continue
score = score.lower()
# bias (mean error, systematic error)
if score in ["bias", "me"]:
bias = err["me"]
result.append(bias)
# mean absolute error
if score in ["mae"]:
MAE = err["mae"]
result.append(MAE)
# mean squared error
if score in ["mse"]:
MAE = err["mse"]
result.append(MAE)
# root mean squared error
if score == 'rmse':
RMSE = np.sqrt(err["mse"])
result.append(RMSE)
# linear correlation coeff (pearson corr)
if score in ["corr_p", "pearsonr"]:
corr_p = err["cov"]/np.sqrt(err["vobs"])/np.sqrt(err["vpred"])
result.append(corr_p)
# beta (linear regression slope)
if score in ["beta"]:
beta = err["cov"]/err["vpred"]
result.append(beta)
# debiased RMSE
if score in ["drmse"]:
RMSE_d = (err["mse"] - err["me"]**2)/err["vobs"]
result.append(RMSE_d)
# reduction of variance (Brier Score, Nash-Sutcliffe efficiency coefficient,
# MSE skill score)
if score in ["rv", "brier_score", "nse"]:
RV = 1.0 - err["mse"]/err["vobs"]
result.append(RV)
return result
| def det_cont_fcst_compute(err, scores):
"""Compute simple and skill scores for deterministic continuous forecasts
from a verification error object.
Parameters
----------
err : dict
A verification error object initialized with
:py:func:`pysteps.verification.detcontscores.det_cont_fcst_init` and
populated with
:py:func:`pysteps.verification.detcontscores.det_cont_fcst_accum`.
scores : string or list of strings
The name(s) of the scores. The list of possible score names is:
.. tabularcolumns:: |p{2cm}|L|
+------------+--------------------------------------------------------+
| Name | Description |
+============+========================================================+
| beta | linear regression slope (conditional bias) |
+------------+--------------------------------------------------------+
| corr_p | pearson's correleation coefficien (linear correlation) |
+------------+--------------------------------------------------------+
| DRMSE | debiased root mean squared error |
+------------+--------------------------------------------------------+
| MAE | mean absolute error |
+------------+--------------------------------------------------------+
| ME | mean error or bias |
+------------+--------------------------------------------------------+
| MSE | mean squared error |
+------------+--------------------------------------------------------+
| RMSE | root mean squared error |
+------------+--------------------------------------------------------+
| RV | reduction of variance |
| | (Brier Score, Nash-Sutcliffe Efficiency) |
+------------+--------------------------------------------------------+
Returns
-------
result : list
list containing the verification results
"""
# catch case of single score passed as string
def get_iterable(x):
if isinstance(x, collections.Iterable) and not isinstance(x, str):
return x
else:
return (x,)
scores = get_iterable(scores)
result = []
for score in scores:
# catch None passed as score
if score is None:
continue
score = score.lower()
# bias (mean error, systematic error)
if score in ["bias", "me"]:
bias = err["me"]
result.append(bias)
# mean absolute error
if score in ["mae"]:
MAE = err["mae"]
result.append(MAE)
# mean squared error
if score in ["mse"]:
MSE = err["mse"]
result.append(MAE)
# root mean squared error
if score == 'rmse':
RMSE = np.sqrt(err["mse"])
result.append(RMSE)
# linear correlation coeff (pearson corr)
if score in ["corr_p", "pearsonr"]:
corr_p = err["cov"]/np.sqrt(err["vobs"])/np.sqrt(err["vpred"])
result.append(corr_p)
# beta (linear regression slope)
if score in ["beta"]:
beta = err["cov"]/err["vpred"]
result.append(beta)
# debiased RMSE
if score in ["drmse"]:
RMSE_d = (err["mse"] - err["me"]**2)/err["vobs"]
result.append(RMSE_d)
# reduction of variance (Brier Score, Nash-Sutcliffe efficiency coefficient,
# MSE skill score)
if score in ["rv", "brier_score", "nse"]:
RV = 1.0 - err["mse"]/err["vobs"]
result.append(RV)
return result
|
55,403 | def get_artifacts_from_rest_api(url, run_id, path=None):
if path:
resp = requests.get(url, params={"run_id": run_id, "path": path})
else:
resp = requests.get(url, params={"run_id": run_id})
assert resp.status_code == 200
return json.loads(resp.content.decode("utf-8"))
| def get_artifacts_from_rest_api(url, run_id, path=None):
if path:
resp = requests.get(url, params={"run_id": run_id, "path": path})
else:
resp = requests.get(url, params={"run_id": run_id})
resp.raise_for_status()
return resp.json()
|
1,238 | def bench_arrayproxy_slicing():
print_git_title('\nArrayProxy gzip slicing')
# each test is a tuple containing
# (HAVE_INDEXED_GZIP, keep_file_open, sliceobj)
tests = list(it.product(HAVE_IGZIP, KEEP_OPENS, SLICEOBJS))
# remove tests where HAVE_INDEXED_GZIP is True and keep_file_open is False,
# because if keep_file_open is False, HAVE_INDEXED_GZIP has no effect
tests = [t for t in tests if not (t[0] and not t[1])]
testfile = 'testfile.nii'
testfilegz = 'test.nii.gz'
def get_test_label(test):
have_igzip = test[0]
keep_open = test[1]
if not (have_igzip and keep_open):
return 'gzip'
else:
return 'indexed_gzip'
def fix_sliceobj(sliceobj):
new_sliceobj = []
for i, s in enumerate(sliceobj):
if s == ':':
new_sliceobj.append(slice(None))
elif s == '?':
new_sliceobj.append(np.random.randint(0, SHAPE[i]))
else:
new_sliceobj.append(int(s * SHAPE[i]))
return tuple(new_sliceobj)
def fmt_sliceobj(sliceobj):
slcstr = []
for i, s in enumerate(sliceobj):
if s in ':?':
slcstr.append(s)
else:
slcstr.append(str(int(s * SHAPE[i])))
return f"[{', '.join(slcstr)}]"
with InTemporaryDirectory():
print(f'Generating test data... '
f'({int(round(np.prod(SHAPE) * 4 / 1048576.0))} MB)')
data = np.array(np.random.random(SHAPE), dtype=np.float32)
# zero out 10% of voxels so gzip has something to compress
mask = np.random.random(SHAPE[:3]) > 0.1
if len(SHAPE) > 3:
data[mask, :] = 0
else:
data[mask] = 0
# save uncompressed and compressed versions of the image
img = nib.nifti1.Nifti1Image(data, np.eye(4))
nib.save(img, testfilegz)
nib.save(img, testfile)
# each result is a tuple containing
# (label, keep_open, sliceobj, testtime, basetime, testmem, basemem)
#
# where "basetime" is the time taken to load and slice a memmapped
# (uncompressed)image, and "basemem" is memory usage for the same
results = []
# We use the same random seed for each slice object,
seeds = [np.random.randint(0, 2 ** 32) for s in SLICEOBJS]
for ti, test in enumerate(tests):
label = get_test_label(test)
have_igzip, keep_open, sliceobj = test
seed = seeds[SLICEOBJS.index(sliceobj)]
print(f'Running test {ti + 1} of {len(tests)} ({label})...')
# load uncompressed and compressed versions of the image
img = nib.load(testfile, keep_file_open=keep_open)
with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', have_igzip):
imggz = nib.load(testfilegz, keep_file_open=keep_open)
def basefunc():
img.dataobj[fix_sliceobj(sliceobj)]
def testfunc():
with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP',
have_igzip):
imggz.dataobj[fix_sliceobj(sliceobj)]
# make sure nothing is floating around from the previous test
# iteration, so memory profiling is (hopefully) more accurate
gc.collect()
if memory_usage is not None:
membaseline = max(memory_usage(lambda: None))
testmem = max(memory_usage(testfunc)) - membaseline
basemem = max(memory_usage(basefunc)) - membaseline
else:
testmem = np.nan
basemem = np.nan
# reset the random number generator, so test and baseline use the
# same slices
np.random.seed(seed)
testtime = float(timeit(testfunc, number=NITERS)) / float(NITERS)
np.random.seed(seed)
basetime = float(timeit(basefunc, number=NITERS)) / float(NITERS)
results.append((label, keep_open, sliceobj, testtime, basetime,
testmem, basemem))
data = np.zeros((len(results), 4))
data[:, 0] = [r[3] for r in results]
data[:, 1] = [r[4] for r in results]
try:
data[:, 2] = [r[3] / r[4] for r in results]
except ZeroDivisionError:
data[:, 2] = np.nan
data[:, 3] = [r[5] - r[6] for r in results]
rowlbls = [(f'Type {r[0]}, keep_open {r[1]}, '
f'slice {fmt_sliceobj(r[2])}') for r in results]
collbls = ['Time', 'Baseline time', 'Time ratio', 'Memory deviation']
print(rst_table(data, rowlbls, collbls))
| def bench_arrayproxy_slicing():
print_git_title('\nArrayProxy gzip slicing')
# each test is a tuple containing
# (HAVE_INDEXED_GZIP, keep_file_open, sliceobj)
tests = list(it.product(HAVE_IGZIP, KEEP_OPENS, SLICEOBJS))
# remove tests where HAVE_INDEXED_GZIP is True and keep_file_open is False,
# because if keep_file_open is False, HAVE_INDEXED_GZIP has no effect
tests = [t for t in tests if not (t[0] and not t[1])]
testfile = 'testfile.nii'
testfilegz = 'test.nii.gz'
def get_test_label(test):
have_igzip = test[0]
keep_open = test[1]
if not (have_igzip and keep_open):
return 'gzip'
else:
return 'indexed_gzip'
def fix_sliceobj(sliceobj):
new_sliceobj = []
for i, s in enumerate(sliceobj):
if s == ':':
new_sliceobj.append(slice(None))
elif s == '?':
new_sliceobj.append(np.random.randint(0, SHAPE[i]))
else:
new_sliceobj.append(int(s * SHAPE[i]))
return tuple(new_sliceobj)
def fmt_sliceobj(sliceobj):
slcstr = []
for i, s in enumerate(sliceobj):
if s in ':?':
slcstr.append(s)
else:
slcstr.append(str(int(s * SHAPE[i])))
return f"[{', '.join(slcstr)}]"
with InTemporaryDirectory():
print(f'Generating test data... ({int(round(np.prod(SHAPE) * 4 / 1048576.0))} MB)')
data = np.array(np.random.random(SHAPE), dtype=np.float32)
# zero out 10% of voxels so gzip has something to compress
mask = np.random.random(SHAPE[:3]) > 0.1
if len(SHAPE) > 3:
data[mask, :] = 0
else:
data[mask] = 0
# save uncompressed and compressed versions of the image
img = nib.nifti1.Nifti1Image(data, np.eye(4))
nib.save(img, testfilegz)
nib.save(img, testfile)
# each result is a tuple containing
# (label, keep_open, sliceobj, testtime, basetime, testmem, basemem)
#
# where "basetime" is the time taken to load and slice a memmapped
# (uncompressed)image, and "basemem" is memory usage for the same
results = []
# We use the same random seed for each slice object,
seeds = [np.random.randint(0, 2 ** 32) for s in SLICEOBJS]
for ti, test in enumerate(tests):
label = get_test_label(test)
have_igzip, keep_open, sliceobj = test
seed = seeds[SLICEOBJS.index(sliceobj)]
print(f'Running test {ti + 1} of {len(tests)} ({label})...')
# load uncompressed and compressed versions of the image
img = nib.load(testfile, keep_file_open=keep_open)
with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', have_igzip):
imggz = nib.load(testfilegz, keep_file_open=keep_open)
def basefunc():
img.dataobj[fix_sliceobj(sliceobj)]
def testfunc():
with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP',
have_igzip):
imggz.dataobj[fix_sliceobj(sliceobj)]
# make sure nothing is floating around from the previous test
# iteration, so memory profiling is (hopefully) more accurate
gc.collect()
if memory_usage is not None:
membaseline = max(memory_usage(lambda: None))
testmem = max(memory_usage(testfunc)) - membaseline
basemem = max(memory_usage(basefunc)) - membaseline
else:
testmem = np.nan
basemem = np.nan
# reset the random number generator, so test and baseline use the
# same slices
np.random.seed(seed)
testtime = float(timeit(testfunc, number=NITERS)) / float(NITERS)
np.random.seed(seed)
basetime = float(timeit(basefunc, number=NITERS)) / float(NITERS)
results.append((label, keep_open, sliceobj, testtime, basetime,
testmem, basemem))
data = np.zeros((len(results), 4))
data[:, 0] = [r[3] for r in results]
data[:, 1] = [r[4] for r in results]
try:
data[:, 2] = [r[3] / r[4] for r in results]
except ZeroDivisionError:
data[:, 2] = np.nan
data[:, 3] = [r[5] - r[6] for r in results]
rowlbls = [(f'Type {r[0]}, keep_open {r[1]}, '
f'slice {fmt_sliceobj(r[2])}') for r in results]
collbls = ['Time', 'Baseline time', 'Time ratio', 'Memory deviation']
print(rst_table(data, rowlbls, collbls))
|
32,616 | def team_cymru_bulk_whois(client: Client, bulk: List[str]) -> Optional[Dict[str, Any]]:
"""Perform lookups by bulk of ip addresses, returning a dictionary of ip -> record (ASN, Country Code, and Netblock Owner.)
:type client: ``Client``
:param client: cymruwhois client to use
:type bulk: ``list``
:param bulk: list of ip addresses
:return: dict containing the result of the lookupmany action as returned from the API (asn, cc, owner, etc.)
:rtype: Dict[str, Dict[str, str]]
"""
raw_result = client.lookupmany_dict(bulk)
return {k: vars(raw_result[k]) for k in raw_result} if raw_result else None
| def team_cymru_bulk_whois(client: Client, bulk: List[str]) -> Optional[Dict[str, Any]]:
"""Perform lookups by bulk of ip addresses, returning a dictionary of ip -> record (ASN, Country Code, and Netblock Owner.)
:type client: ``Client``
:param client: cymruwhois client to use
:type bulk: ``list``
:param bulk: list of ip addresses
:return: Dictionary contains the results of the lookupmany API call.
:rtype: Dict[str, Dict[str, str]]
"""
raw_result = client.lookupmany_dict(bulk)
return {k: vars(raw_result[k]) for k in raw_result} if raw_result else None
|
35,643 | def pil_to_tensor(pic):
"""Convert a ``PIL Image`` to a tensor of the same type.
This function does not support torchscript.
See :class:`~torchvision.transforms.PILToTensor` for more details.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
.. note::
A deep copy of the underdying array is performed.
"""
if not F_pil._is_pil_image(pic):
raise TypeError("pic should be PIL Image. Got {}".format(type(pic)))
if accimage is not None and isinstance(pic, accimage.Image):
# accimage format is always uint8 internally, so always return uint8 here
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
pic.copyto(nppic)
return torch.as_tensor(nppic)
# handle PIL Image
img = torch.as_tensor(np.array(pic, copy=True))
img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
# put it from HWC to CHW format
img = img.permute((2, 0, 1))
return img
| def pil_to_tensor(pic):
"""Convert a ``PIL Image`` to a tensor of the same type.
This function does not support torchscript.
See :class:`~torchvision.transforms.PILToTensor` for more details.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
.. note::
A deep copy of the underdying array is performed.
"""
if not F_pil._is_pil_image(pic):
raise TypeError("pic should be PIL Image. Got {}".format(type(pic)))
if accimage is not None and isinstance(pic, accimage.Image):
# accimage format is always uint8 internally, so always return uint8 here
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
pic.copyto(nppic)
return torch.as_tensor(nppic)
# handle PIL Image
img = torch.as_tensor(np.array(pic))
img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
# put it from HWC to CHW format
img = img.permute((2, 0, 1))
return img
|
57,692 | def search_alarms_command():
args = demisto.args()
time_frame = args.get('time_frame')
start_time = args.get('start_time', 'now-7d')
end_time = args.get('end_time', 'now')
show_suppressed = args.get('show_suppressed', 'false')
limit = int(args.get('limit', 100))
status = args.get('status', None)
priority = args.get('priority', None)
rule_intent = args.get('rule_intent', None)
rule_method = args.get('rule_method', None)
rule_strategy = args.get('rule_strategy', None)
start_time, end_time = get_time_range(time_frame, start_time, end_time)
result = search_alarms(start_time=start_time, end_time=end_time, show_suppressed=show_suppressed, limit=limit,
status=status, priority=priority, rule_intent=rule_intent, rule_method=rule_method,
rule_strategy=rule_strategy)
alarms = parse_alarms(result)
return_outputs(tableToMarkdown('Alarms:', alarms),
{'AlienVault.Alarm(val.ID && val.ID == obj.ID)': alarms}, result)
| def search_alarms_command():
args = demisto.args()
time_frame = args.get('time_frame')
start_time = args.get('start_time', 'now-7d')
end_time = args.get('end_time', 'now')
show_suppressed = args.get('show_suppressed', 'false')
limit = int(args.get('limit', 100))
status = args.get('status')
priority = args.get('priority')
rule_intent = args.get('rule_intent')
rule_method = args.get('rule_method')
rule_strategy = args.get('rule_strategy')
start_time, end_time = get_time_range(time_frame, start_time, end_time)
result = search_alarms(start_time=start_time, end_time=end_time, show_suppressed=show_suppressed, limit=limit,
status=status, priority=priority, rule_intent=rule_intent, rule_method=rule_method,
rule_strategy=rule_strategy)
alarms = parse_alarms(result)
return_outputs(tableToMarkdown('Alarms:', alarms),
{'AlienVault.Alarm(val.ID && val.ID == obj.ID)': alarms}, result)
|
33,233 | def _make_passthrough_contrast(level, contrast_names, model_type='meta', test="t"):
gb_dict = dict(Subject=['subject', 'contrast'],
Session=['session', 'contrast'],
Dataset=['contrast'])
block = OrderedDict(Level=level,
Name=level,
GroupBy=gb_dict[level],
Model={'Type': model_type,
'X': contrast_names})
contrasts = []
for cn in contrast_names:
cdict = OrderedDict(Name=level.lower() + "_" + cn, ConditionList=[cn],
Weights=[1], Test=test)
contrasts.append(cdict)
block["Contrasts"] = contrasts
return block
| def _make_passthrough_contrast(level, contrast_names, model_type='glm', test="t"):
gb_dict = dict(Subject=['subject', 'contrast'],
Session=['session', 'contrast'],
Dataset=['contrast'])
block = OrderedDict(Level=level,
Name=level,
GroupBy=gb_dict[level],
Model={'Type': model_type,
'X': contrast_names})
contrasts = []
for cn in contrast_names:
cdict = OrderedDict(Name=level.lower() + "_" + cn, ConditionList=[cn],
Weights=[1], Test=test)
contrasts.append(cdict)
block["Contrasts"] = contrasts
return block
|
14,450 | def freeze(session):
"""
Freeze live tmux session and Return session config :py:obj:`dict`.
Parameters
----------
session : :class:`libtmux.Session`
session object
Returns
-------
dict
tmuxp compatible workspace config
"""
sconf = {'session_name': session['session_name'], 'windows': []}
for w in session.windows:
wconf = {
'options': w.show_window_options(),
'window_name': w.name,
'layout': w.layout,
'panes': [],
}
if w.get('window_active', '0') == '1':
wconf['focus'] = 'true'
# If all panes have same path, set 'start_directory' instead
# of using 'cd' shell commands.
def pane_has_same_path(p):
return w.panes[0].current_path == p.current_path
if all(pane_has_same_path(p) for p in w.panes):
wconf['start_directory'] = w.panes[0].current_path
if w.name == '':
empty_window_title = True
for p in w.panes:
pconf = {'shell_command': []}
if 'start_directory' not in wconf:
pconf['shell_command'].append('cd ' + p.current_path)
if p.get('pane_active', '0') == '1':
pconf['focus'] = 'true'
current_cmd = p.current_command
def filter_interpretters_and_shells():
return current_cmd.startswith('-') or any(
current_cmd.endswith(cmd) for cmd in ['python', 'ruby', 'node']
)
if filter_interpretters_and_shells():
current_cmd = None
if current_cmd:
pconf['shell_command'].append(current_cmd)
else:
if not len(pconf['shell_command']):
pconf = 'pane'
wconf['panes'].append(pconf)
if empty_window_title:
pconf['shell_command'].append('tmux rename-session \'\'')
sconf['windows'].append(wconf)
return sconf
| def freeze(session):
"""
Freeze live tmux session and Return session config :py:obj:`dict`.
Parameters
----------
session : :class:`libtmux.Session`
session object
Returns
-------
dict
tmuxp compatible workspace config
"""
sconf = {'session_name': session['session_name'], 'windows': []}
for w in session.windows:
wconf = {
'options': w.show_window_options(),
'window_name': w.name,
'layout': w.layout,
'panes': [],
}
if w.get('window_active', '0') == '1':
wconf['focus'] = 'true'
# If all panes have same path, set 'start_directory' instead
# of using 'cd' shell commands.
def pane_has_same_path(p):
return w.panes[0].current_path == p.current_path
if all(pane_has_same_path(p) for p in w.panes):
wconf['start_directory'] = w.panes[0].current_path
empty_window_title = w.name == ''
for p in w.panes:
pconf = {'shell_command': []}
if 'start_directory' not in wconf:
pconf['shell_command'].append('cd ' + p.current_path)
if p.get('pane_active', '0') == '1':
pconf['focus'] = 'true'
current_cmd = p.current_command
def filter_interpretters_and_shells():
return current_cmd.startswith('-') or any(
current_cmd.endswith(cmd) for cmd in ['python', 'ruby', 'node']
)
if filter_interpretters_and_shells():
current_cmd = None
if current_cmd:
pconf['shell_command'].append(current_cmd)
else:
if not len(pconf['shell_command']):
pconf = 'pane'
wconf['panes'].append(pconf)
if empty_window_title:
pconf['shell_command'].append('tmux rename-session \'\'')
sconf['windows'].append(wconf)
return sconf
|
5,367 | def test_uptodate_with_failed_changes():
"""
Test pkg.uptodate with simulated failed changes
"""
pkgs = {
"pkga": {"old": "1.0.1", "new": "2.0.1"},
"pkgb": {"old": "1.0.2", "new": "2.0.2"},
"pkgc": {"old": "1.0.3", "new": "2.0.3"},
}
list_upgrades = MagicMock(
return_value={pkgname: pkgver["new"] for pkgname, pkgver in pkgs.items()}
)
upgrade = MagicMock(return_value={})
version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]["old"])
with patch.dict(
pkg.__salt__,
{
"pkg.list_upgrades": list_upgrades,
"pkg.upgrade": upgrade,
"pkg.version": version,
},
):
# Run state with test=false
with patch.dict(pkg.__opts__, {"test": False}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert not ret["result"]
assert ret["changes"] == {}
# Run state with test=true
with patch.dict(pkg.__opts__, {"test": True}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"] is None
assert ret["changes"] == pkgs
| def test_uptodate_with_failed_changes(pkgs):
"""
Test pkg.uptodate with simulated failed changes
"""
pkgs = {
"pkga": {"old": "1.0.1", "new": "2.0.1"},
"pkgb": {"old": "1.0.2", "new": "2.0.2"},
"pkgc": {"old": "1.0.3", "new": "2.0.3"},
}
list_upgrades = MagicMock(
return_value={pkgname: pkgver["new"] for pkgname, pkgver in pkgs.items()}
)
upgrade = MagicMock(return_value={})
version = MagicMock(side_effect=lambda pkgname, **_: pkgs[pkgname]["old"])
with patch.dict(
pkg.__salt__,
{
"pkg.list_upgrades": list_upgrades,
"pkg.upgrade": upgrade,
"pkg.version": version,
},
):
# Run state with test=false
with patch.dict(pkg.__opts__, {"test": False}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert not ret["result"]
assert ret["changes"] == {}
# Run state with test=true
with patch.dict(pkg.__opts__, {"test": True}):
ret = pkg.uptodate("dummy", test=True, pkgs=[pkgname for pkgname in pkgs],)
assert ret["result"] is None
assert ret["changes"] == pkgs
|
2,555 | def precision_recall_fscore_support(
y_true,
y_pred,
*,
beta=1.0,
labels=None,
pos_label=1,
average=None,
warn_for=("precision", "recall", "f-score"),
sample_weight=None,
zero_division="warn",
):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, default=1.0
The strength of recall versus precision in the F-score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'binary', 'micro', 'macro', 'samples','weighted'}, \
default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated precision of data.
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated recall of data.
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated fbeta_score of data.
support : None (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
"""
_check_zero_division(zero_division)
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == "samples"
MCM = multilabel_confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
samplewise=samplewise,
)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == "micro":
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(
tp_sum, pred_sum, "precision", "predicted", average, warn_for, zero_division
)
recall = _prf_divide(
tp_sum, true_sum, "recall", "true", average, warn_for, zero_division
)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == "warn" and ("f-score",) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, "true nor predicted", "F-score is", len(true_sum))
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.0] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == "weighted":
weights = true_sum
if weights.sum() == 0:
zero_division_value = np.float64(1.0)
if zero_division in ["warn", 0]:
zero_division_value = np.float64(0.0)
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
if pred_sum.sum() == 0:
return (
zero_division_value,
zero_division_value,
zero_division_value,
None,
)
else:
return (np.float64(0.0), zero_division_value, np.float64(0.0), None)
elif average == "samples":
weights = sample_weight
else:
weights = None
if average is not None:
assert average != "binary" or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
| def precision_recall_fscore_support(
y_true,
y_pred,
*,
beta=1.0,
labels=None,
pos_label=1,
average=None,
warn_for=("precision", "recall", "f-score"),
sample_weight=None,
zero_division="warn",
):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, default=1.0
The strength of recall versus precision in the F-score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'binary', 'micro', 'macro', 'samples','weighted'}, \
default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision score.
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated recall of data.
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Estimated fbeta_score of data.
support : None (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
"""
_check_zero_division(zero_division)
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == "samples"
MCM = multilabel_confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
samplewise=samplewise,
)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == "micro":
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(
tp_sum, pred_sum, "precision", "predicted", average, warn_for, zero_division
)
recall = _prf_divide(
tp_sum, true_sum, "recall", "true", average, warn_for, zero_division
)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == "warn" and ("f-score",) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(average, "true nor predicted", "F-score is", len(true_sum))
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.0] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == "weighted":
weights = true_sum
if weights.sum() == 0:
zero_division_value = np.float64(1.0)
if zero_division in ["warn", 0]:
zero_division_value = np.float64(0.0)
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
if pred_sum.sum() == 0:
return (
zero_division_value,
zero_division_value,
zero_division_value,
None,
)
else:
return (np.float64(0.0), zero_division_value, np.float64(0.0), None)
elif average == "samples":
weights = sample_weight
else:
weights = None
if average is not None:
assert average != "binary" or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
|
50,530 | def test_no_additional_imports():
# test that 'import geopandas' does not import any of the optional or
# development dependencies
blacklist = {
"pytest",
"py",
"ipython",
# 'matplotlib', # matplotlib gets imported by pandas, see below
"descartes",
"mapclassify",
# 'rtree', # rtree actually gets imported if installed
"sqlalchemy",
"psycopg2",
"geopy",
"geoalchemy2",
"pygeos",
}
if PANDAS_GE_10:
# pandas > 0.25 stopped importing matplotlib by default
blacklist.add("matplotlib")
code = """
import sys
import geopandas
blacklist = {0!r}
mods = blacklist & set(m.split('.')[0] for m in sys.modules)
if mods:
sys.stderr.write('err: geopandas should not import: {{}}'.format(', '.join(mods)))
sys.exit(len(mods))
""".format(
blacklist
)
call = [sys.executable, "-c", code]
returncode = subprocess.run(call).returncode
assert returncode == 0
| def test_no_additional_imports():
# test that 'import geopandas' does not import any of the optional or
# development dependencies
blacklist = {
"pytest",
"py",
"ipython",
# 'matplotlib', # matplotlib gets imported by pandas, see below
"descartes",
"mapclassify",
# 'rtree', # rtree actually gets imported if installed
"sqlalchemy",
"psycopg2",
"geopy",
"geoalchemy2",
}
if PANDAS_GE_10:
# pandas > 0.25 stopped importing matplotlib by default
blacklist.add("matplotlib")
code = """
import sys
import geopandas
blacklist = {0!r}
mods = blacklist & set(m.split('.')[0] for m in sys.modules)
if mods:
sys.stderr.write('err: geopandas should not import: {{}}'.format(', '.join(mods)))
sys.exit(len(mods))
""".format(
blacklist
)
call = [sys.executable, "-c", code]
returncode = subprocess.run(call).returncode
assert returncode == 0
|
26,310 | def test_ccompiler_namespace():
ccompiler = new_compiler()
customize_compiler(ccompiler)
assert hasattr(ccompiler, "compile")
| def test_ccompiler_namespace(_avoid_permanent_changes_in_sysconfig):
ccompiler = new_compiler()
customize_compiler(ccompiler)
assert hasattr(ccompiler, "compile")
|
21,949 | def _default_frequency_range(syslist, Hz=None, number_of_samples=None,
feature_periphery_decades=None):
"""Compute a default frequency range for frequency domain plots.
This code looks at the poles and zeros of all of the systems that
we are plotting and sets the frequency range to be one decade above
and below the min and max feature frequencies, rounded to the nearest
integer. If no features are found, it returns logspace(-1, 1)
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
Hz : bool. optional
If True, the limits (first and last value) of the frequencies
are set to full decades in Hz so it fits plotting with logarithmic
scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.
number_of_samples : int, optional
Number of samples to generate. The default value is read from
``config.defaults['freqplot.number_of_samples']. If None, then the
default from `numpy.logspace` is used.
feature_periphery_decades : float, optional
Defines how many decades shall be included in the frequency range on
both sides of features (poles, zeros). The default value is read from
``config.defaults['freqplot.feature_periphery_decades']``.
Returns
-------
omega : array
Range of frequencies in rad/sec
Examples
--------
>>> from matlab import ss
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> omega = _default_frequency_range(sys)
"""
# Set default values for options
number_of_samples = config._get_param(
'freqplot', 'number_of_samples', number_of_samples)
feature_periphery_decades = config._get_param(
'freqplot', 'feature_periphery_decades', feature_periphery_decades, 1)
# Find the list of all poles and zeros in the systems
features = np.array(())
freq_interesting = []
# detect if single sys passed by checking if it is sequence-like
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
for sys in syslist:
try:
# Add new features to the list
if sys.isctime():
features_ = np.concatenate((np.abs(sys.pole()),
np.abs(sys.zero())))
# Get rid of poles and zeros at the origin
toreplace = np.isclose(features_, 0.0)
if np.any(toreplace):
features_ = features_[~toreplace]
elif sys.isdtime(strict=True):
fn = math.pi * 1. / sys.dt
# TODO: What distance to the Nyquist frequency is appropriate?
freq_interesting.append(fn * 0.9)
features_ = np.concatenate((sys.pole(),
sys.zero()))
# Get rid of poles and zeros on the real axis (imag==0)
# * origin and real < 0
# * at 1.: would result in omega=0. (logaritmic plot!)
toreplace = np.isclose(features_.imag, 0.0) & (
(features_.real <= 0.) |
(np.abs(features_.real - 1.0) < 1.e-10))
if np.any(toreplace):
features_ = features_[~toreplace]
# TODO: improve
features_ = np.abs(np.log(features_) / (1.j * sys.dt))
else:
# TODO
raise NotImplementedError(
"type of system in not implemented now")
features = np.concatenate((features, features_))
except NotImplementedError:
pass
# Make sure there is at least one point in the range
if features.shape[0] == 0:
features = np.array([1.])
if Hz:
features /= 2. * math.pi
features = np.log10(features)
lsp_min = np.rint(np.min(features) - feature_periphery_decades)
lsp_max = np.rint(np.max(features) + feature_periphery_decades)
if Hz:
lsp_min += np.log10(2. * math.pi)
lsp_max += np.log10(2. * math.pi)
if freq_interesting:
lsp_min = min(lsp_min, np.log10(min(freq_interesting)))
lsp_max = max(lsp_max, np.log10(max(freq_interesting)))
# TODO: Add a check in discrete case to make sure we don't get aliasing
# (Attention: there is a list of system but only one omega vector)
# Set the range to be an order of magnitude beyond any features
if number_of_samples:
omega = np.logspace(
lsp_min, lsp_max, num=number_of_samples, endpoint=True)
else:
omega = np.logspace(lsp_min, lsp_max, endpoint=True)
return omega
| def _default_frequency_range(syslist, Hz=None, number_of_samples=None,
feature_periphery_decades=None):
"""Compute a default frequency range for frequency domain plots.
This code looks at the poles and zeros of all of the systems that
we are plotting and sets the frequency range to be one decade above
and below the min and max feature frequencies, rounded to the nearest
integer. If no features are found, it returns logspace(-1, 1)
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
Hz : bool, optional
If True, the limits (first and last value) of the frequencies
are set to full decades in Hz so it fits plotting with logarithmic
scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.
number_of_samples : int, optional
Number of samples to generate. The default value is read from
``config.defaults['freqplot.number_of_samples']. If None, then the
default from `numpy.logspace` is used.
feature_periphery_decades : float, optional
Defines how many decades shall be included in the frequency range on
both sides of features (poles, zeros). The default value is read from
``config.defaults['freqplot.feature_periphery_decades']``.
Returns
-------
omega : array
Range of frequencies in rad/sec
Examples
--------
>>> from matlab import ss
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> omega = _default_frequency_range(sys)
"""
# Set default values for options
number_of_samples = config._get_param(
'freqplot', 'number_of_samples', number_of_samples)
feature_periphery_decades = config._get_param(
'freqplot', 'feature_periphery_decades', feature_periphery_decades, 1)
# Find the list of all poles and zeros in the systems
features = np.array(())
freq_interesting = []
# detect if single sys passed by checking if it is sequence-like
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
for sys in syslist:
try:
# Add new features to the list
if sys.isctime():
features_ = np.concatenate((np.abs(sys.pole()),
np.abs(sys.zero())))
# Get rid of poles and zeros at the origin
toreplace = np.isclose(features_, 0.0)
if np.any(toreplace):
features_ = features_[~toreplace]
elif sys.isdtime(strict=True):
fn = math.pi * 1. / sys.dt
# TODO: What distance to the Nyquist frequency is appropriate?
freq_interesting.append(fn * 0.9)
features_ = np.concatenate((sys.pole(),
sys.zero()))
# Get rid of poles and zeros on the real axis (imag==0)
# * origin and real < 0
# * at 1.: would result in omega=0. (logaritmic plot!)
toreplace = np.isclose(features_.imag, 0.0) & (
(features_.real <= 0.) |
(np.abs(features_.real - 1.0) < 1.e-10))
if np.any(toreplace):
features_ = features_[~toreplace]
# TODO: improve
features_ = np.abs(np.log(features_) / (1.j * sys.dt))
else:
# TODO
raise NotImplementedError(
"type of system in not implemented now")
features = np.concatenate((features, features_))
except NotImplementedError:
pass
# Make sure there is at least one point in the range
if features.shape[0] == 0:
features = np.array([1.])
if Hz:
features /= 2. * math.pi
features = np.log10(features)
lsp_min = np.rint(np.min(features) - feature_periphery_decades)
lsp_max = np.rint(np.max(features) + feature_periphery_decades)
if Hz:
lsp_min += np.log10(2. * math.pi)
lsp_max += np.log10(2. * math.pi)
if freq_interesting:
lsp_min = min(lsp_min, np.log10(min(freq_interesting)))
lsp_max = max(lsp_max, np.log10(max(freq_interesting)))
# TODO: Add a check in discrete case to make sure we don't get aliasing
# (Attention: there is a list of system but only one omega vector)
# Set the range to be an order of magnitude beyond any features
if number_of_samples:
omega = np.logspace(
lsp_min, lsp_max, num=number_of_samples, endpoint=True)
else:
omega = np.logspace(lsp_min, lsp_max, endpoint=True)
return omega
|
14,538 | def color_from_segment(segment: model.Classified_Segment) -> model.Color:
"""
Segment color legend:
- Yellow - Fixation
- Green - Saccade
- Blue - PSO
- Purple - Smooth pursuit
"""
return color_from_segment_class(segment.segment_class)
| def color_from_segment(segment: model.Classified_Segment) -> model.Color:
"""
Segment color legend:
- Yellow - Fixation
- Green - Saccade
- Blue - Post-saccadic oscillation
- Purple - Smooth pursuit
"""
return color_from_segment_class(segment.segment_class)
|
50,627 | def test_normalize_muted():
make_frame = lambda t: np.array([0.0])
clip = AudioClip(make_frame, duration=1, fps=44100)
clip = audio_normalize(clip)
close_all_clips(locals())
| def test_normalize_muted():
z_array = np.array([0.0])
make_frame = lambda t: z_array
clip = AudioClip(make_frame, duration=1, fps=44100)
clip = audio_normalize(clip)
assert np.array_equal(clip.to_soundarray(), z_array)
close_all_clips(locals())
|
1,021 | def test_pprint_heap_allocated_type():
"""
Test that pprint works for heap allocated types.
"""
module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35"
expected_output = (
"xxlimited.Null" if sys.version_info < (3, 11) else "xxlimited_35.Null"
)
xxlimited = pytest.importorskip(module_name)
output = pretty.pretty(xxlimited.Null)
assert output == expected_output
| def test_pprint_heap_allocated_type():
"""
Test that pprint works for heap allocated types.
"""
module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35"
expected_output = (
"xxlimited.Null" if sys.version_info < (3, 10, 6) else "xxlimited_35.Null"
)
xxlimited = pytest.importorskip(module_name)
output = pretty.pretty(xxlimited.Null)
assert output == expected_output
|
16,085 | def _filter_bad_internal_external_urls(conf: dict) -> dict:
"""Filter internal/external URL with a path."""
for key in CONF_INTERNAL_URL, CONF_EXTERNAL_URL:
if key in conf and urlparse(conf[key]).path not in ("", "/"):
# We warn but do not fix, because if this was incorrectly configured,
# adjusting this valie might impact security.
_LOGGER.warning(
"Invalid %s set. It's not allowed to have a path (/bla)", key
)
return conf
| def _filter_bad_internal_external_urls(conf: dict) -> dict:
"""Filter internal/external URL with a path."""
for key in CONF_INTERNAL_URL, CONF_EXTERNAL_URL:
if key in conf and urlparse(conf[key]).path not in ("", "/"):
# We warn but do not fix, because if this was incorrectly configured,
# adjusting this value might impact security.
_LOGGER.warning(
"Invalid %s set. It's not allowed to have a path (/bla)", key
)
return conf
|
54,720 | def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Utility function to test if input data frame is missing any timestamps relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param pd.DataFrame df: data frame which needs to be tested for missing timestamps
:param str frequency: frequency i.e. sampling frequency of the data, expressed in seconds. A list of acceptable
frequency strings are available here
(https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
:param str column_name: name of the column which has time series if not the index.
:param pd.Timestamp first_time_stamp: timestamp at which the time_series is expected to start from.
:param pd.Timestamp last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df.sort_index(inplace=True)
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
| def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Flag timestamps that are missing.
Utility function to test if input data frame is missing any timestamps
relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param pd.DataFrame df: data frame which needs to be tested for missing timestamps
:param str frequency: frequency i.e. sampling frequency of the data, expressed in seconds. A list of acceptable
frequency strings are available here
(https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
:param str column_name: name of the column which has time series if not the index.
:param pd.Timestamp first_time_stamp: timestamp at which the time_series is expected to start from.
:param pd.Timestamp last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df.sort_index(inplace=True)
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
|
31,986 | def gcp_iam_group_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Create a new group.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
parent = args.get('parent')
description = args.get('description')
display_name = args.get('display_name')
group_email_address = args.get('group_email_address')
response = client.gcp_iam_group_create_request(parent, display_name, group_email_address, description)
outputs = copy.deepcopy(response.get('response'))
outputs = update_time_format(outputs, ['createTime', 'updateTime'])
readable_output = tableToMarkdown(
'Group information:',
outputs,
headers=['name', 'groupKey', 'parent', 'displayName', 'createTime', 'updateTime'],
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='GCP.IAM.Group',
outputs_key_field='name',
outputs=outputs,
raw_response=response
)
return command_results
| def gcp_iam_group_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Create a new group.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
parent = args.get('parent')
description = args.get('description')
display_name = args.get('display_name')
group_email_address = args.get('group_email_address')
response = client.gcp_iam_group_create_request(parent, display_name, group_email_address, description)
outputs = copy.deepcopy(response.get('response'))
outputs = update_time_format(outputs, ['createTime', 'updateTime'])
readable_output = tableToMarkdown(
f'Successfully Created Group "{display_name}"',
outputs,
headers=['name', 'groupKey', 'parent', 'displayName', 'createTime', 'updateTime'],
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='GCP.IAM.Group',
outputs_key_field='name',
outputs=outputs,
raw_response=response
)
return command_results
|
27,802 | def prepare_release_pr(
base_branch: str, is_major: bool, token: str, prerelease: str
) -> None:
print()
print(f"Processing release for branch {Fore.CYAN}{base_branch}")
check_call(["git", "checkout", f"origin/{base_branch}"])
changelog = Path("changelog")
features = list(changelog.glob("*.feature.rst"))
breaking = list(changelog.glob("*.breaking.rst"))
is_feature_release = features or breaking
try:
version = find_next_version(
base_branch, is_major, is_feature_release, prerelease
)
except InvalidFeatureRelease as e:
print(f"{Fore.RED}{e}")
raise SystemExit(1)
template_name = ""
if is_major:
template_name = "release.pre.rst"
elif is_feature_release:
template_name = "release.minor.rst"
else:
template_name = "release.patch.rst"
print(f"Version: {Fore.CYAN}{version}")
release_branch = f"release-{version}"
run(
["git", "config", "user.name", "pytest bot"],
check=True,
)
run(
["git", "config", "user.email", "[email protected]"],
check=True,
)
run(
["git", "checkout", "-b", release_branch, f"origin/{base_branch}"],
check=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} created.")
# important to use tox here because we have changed branches, so dependencies
# might have changed as well
cmdline = [
"tox",
"-e",
"release",
"--",
version,
template_name,
"--skip-check-links",
]
print("Running", " ".join(cmdline))
run(
cmdline,
check=True,
)
oauth_url = f"https://{token}:[email protected]/{SLUG}.git"
run(
["git", "push", oauth_url, f"HEAD:{release_branch}", "--force"],
check=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} pushed.")
body = PR_BODY.format(version=version)
repo = login(token)
pr = repo.create_pull(
f"Prepare release {version}",
base=base_branch,
head=release_branch,
body=body,
)
print(f"Pull request {Fore.CYAN}{pr.url}{Fore.RESET} created.")
| def prepare_release_pr(
base_branch: str, is_major: bool, token: str, prerelease: str
) -> None:
print()
print(f"Processing release for branch {Fore.CYAN}{base_branch}")
check_call(["git", "checkout", f"origin/{base_branch}"])
changelog = Path("changelog")
features = list(changelog.glob("*.feature.rst"))
breaking = list(changelog.glob("*.breaking.rst"))
is_feature_release = features or breaking
try:
version = find_next_version(
base_branch, is_major, is_feature_release, prerelease
)
except InvalidFeatureRelease as e:
print(f"{Fore.RED}{e}")
raise SystemExit(1)
template_name = ""
if prerelease:
template_name = "release.pre.rst"
elif is_feature_release:
template_name = "release.minor.rst"
else:
template_name = "release.patch.rst"
print(f"Version: {Fore.CYAN}{version}")
release_branch = f"release-{version}"
run(
["git", "config", "user.name", "pytest bot"],
check=True,
)
run(
["git", "config", "user.email", "[email protected]"],
check=True,
)
run(
["git", "checkout", "-b", release_branch, f"origin/{base_branch}"],
check=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} created.")
# important to use tox here because we have changed branches, so dependencies
# might have changed as well
cmdline = [
"tox",
"-e",
"release",
"--",
version,
template_name,
"--skip-check-links",
]
print("Running", " ".join(cmdline))
run(
cmdline,
check=True,
)
oauth_url = f"https://{token}:[email protected]/{SLUG}.git"
run(
["git", "push", oauth_url, f"HEAD:{release_branch}", "--force"],
check=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} pushed.")
body = PR_BODY.format(version=version)
repo = login(token)
pr = repo.create_pull(
f"Prepare release {version}",
base=base_branch,
head=release_branch,
body=body,
)
print(f"Pull request {Fore.CYAN}{pr.url}{Fore.RESET} created.")
|
51,440 | def guess_engine(store_spec):
engines = list_engines()
# use the pre-defined selection order for netCDF files
for engine in ["netcdf4", "h5netcdf", "scipy"]:
if engine in engines and engines[engine].guess_can_open(store_spec):
return engine
for engine, beckend in engines.items():
try:
if beckend.guess_can_open and beckend.guess_can_open(store_spec):
return engine
except Exception:
logging.exception(f"{engine!r} fails while guessing")
raise ValueError("cannot guess the engine, try passing one explicitly")
| def guess_engine(store_spec):
engines = list_engines()
# use the pre-defined selection order for netCDF files
for engine in ["netcdf4", "h5netcdf", "scipy"]:
if engine in engines and engines[engine].guess_can_open(store_spec):
return engine
for engine, backend in engines.items():
try:
if backend.guess_can_open and backend.guess_can_open(store_spec):
return engine
except Exception:
logging.exception(f"{engine!r} fails while guessing")
raise ValueError("cannot guess the engine, try passing one explicitly")
|
31,866 | def main() -> None:
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy
)
if demisto.command() == 'test-module':
result = test_api(client)
if result == 'healthy':
return_results('ok')
else:
raise RuntimeError('Penfield API cannot be reached')
elif demisto.command() == 'PenfieldGetAssignee':
result = get_assignee(client, demisto.args())
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}'
)
| def main() -> None:
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy
)
if demisto.command() == 'test-module':
result = test_api(client)
if result == 'healthy':
return_results('ok')
else:
raise RuntimeError('Penfield API cannot be reached')
elif demisto.command() == 'penfield-get-assignee':
result = get_assignee(client, demisto.args())
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}'
)
|
52,192 | def handle_404_error(code, request, exception=None):
extraneous_char_list = [
'!', '#', '$', '%', '&', '(', ')', '*', '+', ',', '-', '.', ':', ';',
'<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '~'
]
if request.path != request.path.lower():
return redirect(request.path.lower(), permanent=True)
if request.path[-2:] == " )":
return redirect(request.path[:-2:], permanent=True)
if request.path[-1] in extraneous_char_list:
return redirect(request.path[:-1:], permanent=True)
return handle_error(code, request, exception)
| def handle_404_error(code, request, exception=None):
extraneous_char_list = [
'!', '#', '$', '%', '&', '(', ')', '*', '+', ',', '-', '.', ':', ';',
'<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '~'
]
if request.path != request.path.lower():
return redirect(request.path.lower(), permanent=True)
if request.path[-2:] == " )":
return redirect(request.path[:-2], permanent=True)
if request.path[-1] in extraneous_char_list:
return redirect(request.path[:-1], permanent=True)
return handle_error(code, request, exception)
|
26,348 | def main():
parser = argparse.ArgumentParser()
parser.add_argument("--num-shards", type=int, default=1)
parser.add_argument("--shard-index", type=int, default=0)
parser.add_argument("dists", metavar="DISTRIBUTION", type=str, nargs="*")
args = parser.parse_args()
typeshed_dir = Path(".").resolve()
if len(args.dists) == 0:
dists = sorted((typeshed_dir / "stubs").iterdir())
else:
dists = [typeshed_dir / "stubs" / d for d in args.dists]
for i, dist in enumerate(dists):
if i % args.num_shards != args.shard_index:
continue
if dist.name in EXCLUDE_LIST:
continue
run_stubtest(dist)
| def main():
parser = argparse.ArgumentParser()
parser.add_argument("--num-shards", type=int, default=1)
parser.add_argument("--shard-index", type=int, default=0)
parser.add_argument("dists", metavar="DISTRIBUTION", type=str, nargs=argparse.ZERO_OR_MORE)
args = parser.parse_args()
typeshed_dir = Path(".").resolve()
if len(args.dists) == 0:
dists = sorted((typeshed_dir / "stubs").iterdir())
else:
dists = [typeshed_dir / "stubs" / d for d in args.dists]
for i, dist in enumerate(dists):
if i % args.num_shards != args.shard_index:
continue
if dist.name in EXCLUDE_LIST:
continue
run_stubtest(dist)
|
28,974 | def dynamic_cooldown(
cooldown: Callable[[Context[Any]], Optional[Cooldown]],
type: Union[BucketType, Callable[[Context[Any]], Any]],
) -> Callable[[T], T]:
"""A decorator that adds a dynamic cooldown to a :class:`.Command`
This differs from :func:`.cooldown` in that it takes a function that
accepts a single parameter of type :class:`.Context` and must
return a :class:`.cooldowns.Cooldown` or ``None``.
If ``None`` is returned then that cooldown is effectively bypassed.
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
.. versionadded:: 2.0
Parameters
------------
cooldown: Callable[[:class:`.Context`], Optional[:class:`.cooldowns.Cooldown`]]
A function that takes a message and returns a cooldown that will
apply to this invocation or ``None`` if the cooldown should be bypassed.
type: :class:`.BucketType`
The type of cooldown to have.
"""
if not callable(cooldown):
raise TypeError("A callable must be provided")
if type is BucketType.default:
raise ValueError('BucketType.default cannot be used in dynamic cooldowns')
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = DynamicCooldownMapping(cooldown, type)
else:
func.__commands_cooldown__ = DynamicCooldownMapping(cooldown, type)
return func
return decorator # type: ignore
| def dynamic_cooldown(
cooldown: Callable[[Context[Any]], Optional[Cooldown]],
type: Union[BucketType, Callable[[Context[Any]], Any]],
) -> Callable[[T], T]:
"""A decorator that adds a dynamic cooldown to a :class:`.Command`
This differs from :func:`.cooldown` in that it takes a function that
accepts a single parameter of type :class:`.Context` and must
return a :class:`.cooldowns.Cooldown` or ``None``.
If ``None`` is returned then that cooldown is effectively bypassed.
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
.. versionadded:: 2.0
Parameters
------------
cooldown: Callable[[:class:`.Context`], Optional[:class:`~discord.app_commands.Cooldown`]]
A function that takes a message and returns a cooldown that will
apply to this invocation or ``None`` if the cooldown should be bypassed.
type: :class:`.BucketType`
The type of cooldown to have.
"""
if not callable(cooldown):
raise TypeError("A callable must be provided")
if type is BucketType.default:
raise ValueError('BucketType.default cannot be used in dynamic cooldowns')
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = DynamicCooldownMapping(cooldown, type)
else:
func.__commands_cooldown__ = DynamicCooldownMapping(cooldown, type)
return func
return decorator # type: ignore
|
56,954 | def get_server_config(island_args: IslandCmdArgs) -> IslandConfigOptions:
config = IslandConfigOptions({})
update_config_from_file(config, PACKAGE_CONFIG_PATH)
update_config_from_file(config, USER_CONFIG_PATH)
if island_args.server_config_path:
path_to_config = expand_path(island_args.server_config_path)
update_config_from_file(config, path_to_config)
return config
| def get_server_config(island_args: IslandCmdArgs) -> IslandConfigOptions:
config = _load_default_island_config_options()
...
|
32,108 | def ip_details_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
ip command: Returns IP details for a list of IPs
"""
ip_addresses_string = args.get('ip')
ip_addresses_array = argToList(ip_addresses_string)
invalid_ips = []
for ip_address in ip_addresses_array: # Check for Valid IP Inputs
if not is_ip_valid(ip_address, accept_v6_ips=True):
invalid_ips.append(ip_address)
if invalid_ips:
return_warning('The following IP Addresses were found invalid: {}'.format(', '.join(invalid_ips)),
exit=len(invalid_ips) == len(ip_addresses_array))
enhanced = argToBoolean(args.get('enhanced', False))
response = client.get_ip_details(ip_addresses_array, enhanced)
ip_list = response.get("data", {}).get("results", {})
ip_map = {ip["name2"]: ip for ip in ip_list}
for ip_obj in ip_addresses_array:
if ip_obj not in ip_map:
ip_map.update({ip_obj: []})
ip_data_list = []
for ip_key, ip_data in ip_map.items():
if ip_data:
score = to_dbot_score(ip_data.get("score", 0))
dbot_score = Common.DBotScore(
indicator=ip_data.get("name2"),
indicator_type=DBotScoreType.IP,
integration_name='CTIX',
score=score
)
ip_standard_context = Common.IP(
ip=ip_data.get("name2"),
asn=ip_data.get("asn"),
dbot_score=dbot_score
)
ip_data_list.append(CommandResults(
readable_output=tableToMarkdown('IP Data', ip_data, removeNull=True),
outputs_prefix='CTIX.IP',
outputs_key_field='name2',
outputs=ip_data,
indicator=ip_standard_context
))
else:
dbot_score = Common.DBotScore(
indicator=ip_key,
indicator_type=DBotScoreType.IP,
integration_name="CTIX",
score=0,
)
ip_standard_context = Common.IP(
ip=ip_key,
dbot_score=dbot_score
)
ip_data_list.append(CommandResults(
readable_output=f'No matches found for IP {ip_key}',
outputs_prefix='CTIX.IP',
outputs_key_field='name2',
outputs=ip_data,
indicator=ip_standard_context
))
return ip_data_list
| def ip_details_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
ip command: Returns IP details for a list of IPs
"""
ip_addresses_string = args.get('ip')
ip_addresses_array = argToList(ip_addresses_string)
invalid_ips = []
for ip_address in ip_addresses_array: # Check for Valid IP Inputs
if not is_ip_valid(ip_address, accept_v6_ips=True):
invalid_ips.append(ip_address)
if invalid_ips:
return_warning('The following IP Addresses were found invalid: {}'.format(', '.join(invalid_ips)),
exit=len(invalid_ips) == len(ip_addresses_array))
enhanced = argToBoolean(args.get('enhanced', False))
response = client.get_ip_details(ip_addresses_array, enhanced)
ip_list = response.get("data", {}).get("results", {})
ip_map = {ip.get("name2"): ip for ip in ip_list}
for ip_obj in ip_addresses_array:
if ip_obj not in ip_map:
ip_map.update({ip_obj: []})
ip_data_list = []
for ip_key, ip_data in ip_map.items():
if ip_data:
score = to_dbot_score(ip_data.get("score", 0))
dbot_score = Common.DBotScore(
indicator=ip_data.get("name2"),
indicator_type=DBotScoreType.IP,
integration_name='CTIX',
score=score
)
ip_standard_context = Common.IP(
ip=ip_data.get("name2"),
asn=ip_data.get("asn"),
dbot_score=dbot_score
)
ip_data_list.append(CommandResults(
readable_output=tableToMarkdown('IP Data', ip_data, removeNull=True),
outputs_prefix='CTIX.IP',
outputs_key_field='name2',
outputs=ip_data,
indicator=ip_standard_context
))
else:
dbot_score = Common.DBotScore(
indicator=ip_key,
indicator_type=DBotScoreType.IP,
integration_name="CTIX",
score=0,
)
ip_standard_context = Common.IP(
ip=ip_key,
dbot_score=dbot_score
)
ip_data_list.append(CommandResults(
readable_output=f'No matches found for IP {ip_key}',
outputs_prefix='CTIX.IP',
outputs_key_field='name2',
outputs=ip_data,
indicator=ip_standard_context
))
return ip_data_list
|
5,466 | def global_contribution_form(request):
"""Adds contribution form to the context."""
if enabled(request):
return {
'contribution_enabled': True,
'contribution_recurring_payment_enabled': recurring_payment_enabled(request),
'contribution_popup': popup_enabled(request),
'contribution_form': ContributionForm(),
'hide_cta': True,
}
return {'contribution_enabled': False}
| def global_contribution_form(request):
"""Adds contribution form to the context."""
if enabled(request):
return {
'contribution_enabled': True,
'recurring_payment_enabled': recurring_payment_enabled(request),
'contribution_popup': popup_enabled(request),
'contribution_form': ContributionForm(),
'hide_cta': True,
}
return {'contribution_enabled': False}
|
31,744 | def rasterize_command():
url = demisto.getArg('url')
w = demisto.args().get('width', DEFAULT_W_WIDE).rstrip('px')
h = demisto.args().get('height', DEFAULT_H).rstrip('px')
r_type = demisto.args().get('type', 'png')
wait_time = int(demisto.args().get('wait_time', 0))
page_load = int(demisto.args().get('max_page_load_time', DEFAULT_PAGE_LOAD_TIME))
filename = demisto.args().get('filename', 'url')
if not (url.startswith('http')):
url = f'http://{url}'
filename = f'{filename}.{"pdf" if r_type == "pdf" else "png"}' # type: ignore
output = rasterize(path=url, r_type=r_type, width=w, height=h, wait_time=wait_time, max_page_load_time=page_load)
if r_type == 'json':
return_results(CommandResults(raw_response=output, readable_output="Successfully load image for url: " + url))
return
res = fileResult(filename=filename, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
| def rasterize_command():
url = demisto.getArg('url')
w = demisto.args().get('width', DEFAULT_W_WIDE).rstrip('px')
h = demisto.args().get('height', DEFAULT_H).rstrip('px')
r_type = demisto.args().get('type', 'png')
wait_time = int(demisto.args().get('wait_time', 0))
page_load = int(demisto.args().get('max_page_load_time', DEFAULT_PAGE_LOAD_TIME))
file_name = demisto.args().get('file_name', 'url')
if not (url.startswith('http')):
url = f'http://{url}'
filename = f'{filename}.{"pdf" if r_type == "pdf" else "png"}' # type: ignore
output = rasterize(path=url, r_type=r_type, width=w, height=h, wait_time=wait_time, max_page_load_time=page_load)
if r_type == 'json':
return_results(CommandResults(raw_response=output, readable_output="Successfully load image for url: " + url))
return
res = fileResult(filename=filename, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
|
43,565 | def BasisEmbedding(features, wires):
r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits.
For example, for ``features=[0, 1, 0]``, the quantum system will be prepared in state :math:`|010 \rangle`.
.. note::
BasisEmbedding uses PennyLane's :class:`~pennylane.ops.BasisState` and only works in conjunction with
devices that implement this function.
Args:
features (array): Binary input array of shape ``(n, )``
wires (Sequence[int]): sequence of qubit indices that the template acts on
Raises:
ValueError: if `features` or `wires` is invalid
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if len(features) > len(wires):
raise ValueError("Number of bits to embed cannot be larger than number of wires, which is {}; "
"got {}.".format(len(wires), len(features)))
BasisState(features, wires=wires)
| def BasisEmbedding(features, wires):
r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits.
For example, for ``features=[0, 1, 0]``, the quantum system will be prepared in state :math:`|010 \rangle`.
.. note::
BasisEmbedding uses PennyLane's :class:`~pennylane.ops.BasisState` and only works in conjunction with
devices that implement this function.
Args:
features (array): Binary input array of shape ``(n, )``
wires (Sequence[int]): sequence of qubit indices that the template acts on
Raises:
ValueError: if ``features`` or ``wires`` is invalid
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if len(features) > len(wires):
raise ValueError("Number of bits to embed cannot be larger than number of wires, which is {}; "
"got {}.".format(len(wires), len(features)))
BasisState(features, wires=wires)
|
43,086 | def load(f, ir="blackbird"):
"""Load a quantum program from a Blackbird .xbb file.
**Example:**
The following Blackbird file, ``program1.xbb``,
.. code-block:: python3
name test_program
version 1.0
Sgate(0.543, 0.0) | 1
BSgate(0.6, 0.1) | [2, 0]
MeasureFock() | [0, 1, 2]
can be imported into Strawberry Fields using the ``loads``
function:
>>> sf.loads("program1.xbb")
>>> prog.name
'test_program'
>>> prog.num_subsystems
3
>>> prog.print()
Sgate(0.543, 0) | (q[1])
BSgate(0.6, 0.1) | (q[2], q[0])
MeasureFock | (q[0], q[1], q[2])
Args:
f (Union[file, str, pathlib.Path]): File or filename from which
the data is loaded. If file is a string or Path, a value with the
.xbb extension is expected.
ir (str): Intermediate representation language to use. Can be either "blackbird" or "xir".
Returns:
prog (Program): Strawberry Fields program
"""
own_file = False
try:
if hasattr(f, "read"):
# argument file is a file-object
fid = f
else:
# argument file is a Path or string
filename = os.fspath(f)
fid = open(filename, "r")
own_file = True
except TypeError as e:
raise ValueError("file must be a string, pathlib.Path, or file-like object") from e
try:
prog_str = fid.read()
finally:
if own_file:
# safely close the file
fid.close()
# load blackbird program
return loads(prog_str, ir=ir)
| def load(f, ir="blackbird"):
"""Load a quantum program from a Blackbird .xbb file.
**Example:**
The following Blackbird file, ``program1.xbb``,
.. code-block:: python3
name test_program
version 1.0
Sgate(0.543, 0.0) | 1
BSgate(0.6, 0.1) | [2, 0]
MeasureFock() | [0, 1, 2]
can be imported into Strawberry Fields using the ``loads``
function:
>>> sf.loads("program1.xbb", ir="blackbird")
>>> prog.name
'test_program'
>>> prog.num_subsystems
3
>>> prog.print()
Sgate(0.543, 0) | (q[1])
BSgate(0.6, 0.1) | (q[2], q[0])
MeasureFock | (q[0], q[1], q[2])
Args:
f (Union[file, str, pathlib.Path]): File or filename from which
the data is loaded. If file is a string or Path, a value with the
.xbb extension is expected.
ir (str): Intermediate representation language to use. Can be either "blackbird" or "xir".
Returns:
prog (Program): Strawberry Fields program
"""
own_file = False
try:
if hasattr(f, "read"):
# argument file is a file-object
fid = f
else:
# argument file is a Path or string
filename = os.fspath(f)
fid = open(filename, "r")
own_file = True
except TypeError as e:
raise ValueError("file must be a string, pathlib.Path, or file-like object") from e
try:
prog_str = fid.read()
finally:
if own_file:
# safely close the file
fid.close()
# load blackbird program
return loads(prog_str, ir=ir)
|
30,177 | def execute_link(link_cmd_args, record_streams, quiet):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
if (quiet == False): #record_streams true, quiet false
return_code, stdout_str, stderr_str = \
securesystemslib.process.run_duplicate_streams(link_cmd_args)
else: #record_streams true, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.PIPE,
stderr=securesystemslib.process.PIPE)
stdout_str = process.stdout
stderr_str = process.stderr
return_code = process.returncode
else:
if (quiet == False): #record_streams false, quiet false
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=None, stderr=None)
stdout_str = stderr_str = ""
return_code = process.returncode
else: #record_streams false, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.DEVNULL,
stderr=securesystemslib.process.DEVNULL)
stdout_str = stderr_str = ""
return_code = process.returncode
return {
"stdout": stdout_str,
"stderr": stderr_str,
"return-value": return_code
}
| def execute_link(link_cmd_args, record_streams, quiet):
"""
<Purpose>
Executes the passed command plus arguments in a subprocess and returns
the return value of the executed command. If the specified standard output
and standard error of the command are recorded and also returned to the
caller.
<Arguments>
link_cmd_args:
A list where the first element is a command and the remaining
elements are arguments passed to that command.
record_streams:
A bool that specifies whether to redirect standard output and
and standard error to a temporary file which is returned to the
caller (True) or not (False).
<Exceptions>
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
Executes passed command in a subprocess and redirects stdout and stderr
if specified.
<Returns>
- A dictionary containing standard output and standard error of the
executed command, called by-products.
Note: If record_streams is False, the dict values are empty strings.
- The return value of the executed command.
"""
if record_streams:
if (quiet == False): #record_streams true, quiet false
return_code, stdout_str, stderr_str = \
securesystemslib.process.run_duplicate_streams(link_cmd_args)
else: #record_streams true, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.PIPE,
stderr=securesystemslib.process.PIPE)
stdout_str = process.stdout
stderr_str = process.stderr
return_code = process.returncode
else:
if (quiet == False): #record_streams false, quiet false
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=None, stderr=None)
stdout_str = stderr_str = ""
return_code = process.returncode
else: #record_streams false, quiet true
process = securesystemslib.process.run(link_cmd_args, check=False,
stdout=securesystemslib.process.DEVNULL,
stderr=securesystemslib.process.DEVNULL)
stdout_str = stderr_str = ""
return_code = process.returncode
return {
"stdout": stdout_str,
"stderr": stderr_str,
"return-value": return_code
}
|
42,998 | def nullTi(m, n, U):
r"""Nullifies element m,n of U using Ti"""
(nmax, mmax) = U.shape
if nmax != mmax:
raise ValueError("U must be a square matrix")
if (U[m, n+1] == 0 and U[m, n] == 0):
thetar = 0
phir = 0
elif U[m, n+1] == 0:
thetar = np.pi/2
phir = 0
else:
r = U[m, n] / U[m, n+1]
thetar = np.arctan(np.abs(r))
phir = np.angle(r)
return [n, n+1, thetar, phir, nmax]
| def nullTi(m, n, U):
r"""Nullifies element m,n of U using Ti"""
(nmax, mmax) = U.shape
if nmax != mmax:
raise ValueError("U must be a square matrix")
if U[m, n] == 0:
thetar = 0
phir = 0
elif U[m, n+1] == 0:
thetar = np.pi/2
phir = 0
else:
r = U[m, n] / U[m, n+1]
thetar = np.arctan(np.abs(r))
phir = np.angle(r)
return [n, n+1, thetar, phir, nmax]
|
39,844 | def load_model_ensemble_and_task_from_hf(
model_id,
cache_dir: Optional[str] = None,
**kwargs: Any,
):
LIBRARY_NAME = "fairseq"
CACHE_DIRECTORY = os.path.join(Path.home(), ".cache", LIBRARY_NAME)
cache_dir = cache_dir or CACHE_DIRECTORY
try:
from huggingface_hub import snapshot_download # type: ignore
except ImportError:
raise ImportError(
"You need to install huggingface_hub to use `load_from_hf_hub`. "
"See https://pypi.org/project/huggingface-hub/ for installation."
)
cached_directory = snapshot_download(
model_id,
cache_dir=cache_dir,
library_name=LIBRARY_NAME,
**kwargs,
)
# fetch all model filenames
filenames = glob.glob(os.path.join(cached_directory, "*.pt"))
model_ensemble = load_model_ensemble_and_task(filenames, arg_overrides={"data": cached_directory})
return model_ensemble
| def load_model_ensemble_and_task_from_hf(
model_id,
cache_dir: Optional[str] = None,
**kwargs: Any,
):
LIBRARY_NAME = "fairseq"
CACHE_DIRECTORY = os.path.join(Path.home(), ".cache", LIBRARY_NAME)
cache_dir = cache_dir or CACHE_DIRECTORY
try:
from huggingface_hub import snapshot_download
except ImportError:
raise ImportError(
"You need to install huggingface_hub to use `load_from_hf_hub`. "
"See https://pypi.org/project/huggingface-hub/ for installation."
)
cached_directory = snapshot_download(
model_id,
cache_dir=cache_dir,
library_name=LIBRARY_NAME,
**kwargs,
)
# fetch all model filenames
filenames = glob.glob(os.path.join(cached_directory, "*.pt"))
model_ensemble = load_model_ensemble_and_task(filenames, arg_overrides={"data": cached_directory})
return model_ensemble
|
55,591 | def update_auth(c, config):
"""
Set auth related configuration from YAML config file.
As an example, this function should update the following TLJH auth
configuration:
```yaml
auth:
type: oauthenticator.github.GitHubOAuthenticator
GitHubOAuthenticator:
client_id: "..."
client_secret: "..."
oauth_callback_url: "..."
ArbitraryKey:
arbitrary_key: "..."
arbitrary_key_with_none_value:
```
by applying the following configuration:
```python
c.JupyterHub.authenticator_class = "oauthenticator.github.GitHubOAuthenticator"
c.GitHubOAuthenticator.client_id = "..."
c.GitHubOAuthenticator.client_secret = "..."
c.GitHubOAuthenticator.oauth_callback_url = "..."
c.ArbitraryKey.arbitrary_key = "..."
```
Note that "auth.type" and "auth.ArbitraryKey.arbitrary_key_with_none_value"
are treated a bit differently. auth.type will always map to
c.JupyterHub.authenticator_class and any configured value being None won't
be set.
"""
tljh_auth_config = config['auth']
c.JupyterHub.authenticator_class = tljh_auth_config['type']
for auth_key, auth_value in tljh_auth_config.items():
if auth_key == "type":
continue
traitlet_class_name = auth_key
traitlet_class_config = auth_value
traitlet_class_instance = getattr(c, traitlet_class_name)
for config_name, config_value in traitlet_class_config.items():
set_if_not_none(traitlet_class_instance, config_name, config_value)
| def update_auth(c, config):
"""
Set auth related configuration from YAML config file.
As an example, this function should update the following TLJH auth
configuration:
```yaml
auth:
type: oauthenticator.github.GitHubOAuthenticator
GitHubOAuthenticator:
client_id: "..."
client_secret: "..."
oauth_callback_url: "..."
ArbitraryKey:
arbitrary_key: "..."
arbitrary_key_with_none_value:
```
by applying the following configuration:
```python
c.JupyterHub.authenticator_class = "oauthenticator.github.GitHubOAuthenticator"
c.GitHubOAuthenticator.client_id = "..."
c.GitHubOAuthenticator.client_secret = "..."
c.GitHubOAuthenticator.oauth_callback_url = "..."
c.ArbitraryKey.arbitrary_key = "..."
```
Note that "auth.type" and "auth.ArbitraryKey.arbitrary_key_with_none_value"
are treated a bit differently. auth.type will always map to
c.JupyterHub.authenticator_class and any configured value being None won't
be set.
"""
tljh_auth_config = config['auth']
c.JupyterHub.authenticator_class = tljh_auth_config['type']
for auth_key, auth_value in tljh_auth_config.items():
if auth_key == "type":
continue
class_name = auth_key
class_config_to_set = auth_value
class_config = c[class_name]
for config_name, config_value in class_config_to_set.items():
set_if_not_none(class_config, config_name, config_value)
|
44,083 | def generate_moment(basis_a, basis_b, e, idx):
r"""Return a function that computes the multipole moment integral for two contracted Gaussians.
The multipole moment integral for two primitive Gaussian functions is computed as
.. math::
S^e = \left \langle G_i | q_C^e | G_j \right \rangle
\left \langle G_k | G_l \right \rangle
\left \langle G_m | G_n \right \rangle,
where :math:`G_{i-n}` is a one-dimensional Gaussian function, :math:`q = x, y, z` is the
dimension at which the integral is evaluated, :math:`C` is the origin of the Cartesian
coordinates and :math:`e` is the multipole moment order. For contracted Gaussians, such
integrals will be computed over primitive Gaussians, multiplied by the normalized contraction
coefficients and finally summed over.
The ``idx`` argument determines the dimension :math:`q` at which the integral is computed. It
can be :math:`0, 1, 2` for :math:`x, y, z` components, respectively.
Args:
basis_a (BasisFunction): first basis function
basis_b (BasisFunction): second basis function
e (integer): order of the multipole moment
idx (integer): index determining the dimension of the multipole moment integral
Returns:
function: function that computes the multipole moment integral
**Example**
>>> symbols = ['H', 'Li']
>>> geometry = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.hf.Molecule(symbols, geometry)
>>> args = []
>>> e, idx = 1, 0
>>> generate_moment(mol.basis_set[0], mol.basis_set[1], e, idx)(*args)
3.12846324e-01
"""
def moment_integral(*args):
r"""Normalize and compute the multipole moment integral for two contracted Gaussians.
Args:
args (array[float]): initial values of the differentiable parameters
Returns:
array[float]: the multipole moment integral between two contracted Gaussian orbitals
"""
args_a = [i[0] for i in args]
args_b = [i[1] for i in args]
la = basis_a.l
lb = basis_b.l
alpha, ca, ra = _generate_params(basis_a.params, args_a)
beta, cb, rb = _generate_params(basis_b.params, args_b)
ca = ca * primitive_norm(basis_a.l, alpha)
cb = cb * primitive_norm(basis_b.l, beta)
na = contracted_norm(basis_a.l, alpha, ca)
nb = contracted_norm(basis_b.l, beta, cb)
p = alpha[:, anp.newaxis] + beta
q = anp.sqrt(anp.pi / p)
rc = (
alpha[:, anp.newaxis] * ra[:, anp.newaxis, anp.newaxis]
+ beta * rb[:, anp.newaxis, anp.newaxis]
) / p
i, j, k = anp.roll(anp.array([0, 2, 1]), idx)
s = (
gaussian_moment(la[i], lb[i], ra[i], rb[i], alpha[:, anp.newaxis], beta, e, rc[i])
* expansion(la[j], lb[j], ra[j], rb[j], alpha[:, anp.newaxis], beta, 0)
* q
* expansion(la[k], lb[k], ra[k], rb[k], alpha[:, anp.newaxis], beta, 0)
* q
)
return (na * nb * (ca[:, anp.newaxis] * cb) * s).sum()
return moment_integral
| def generate_moment(basis_a, basis_b, e, idx):
r"""Return a function that computes the multipole moment integral for two contracted Gaussians.
The multipole moment integral for two primitive Gaussian functions is computed as
.. math::
S^e = \left \langle G_i | q_C^e | G_j \right \rangle
\left \langle G_k | G_l \right \rangle
\left \langle G_m | G_n \right \rangle,
where :math:`G_{i-n}` is a one-dimensional Gaussian function, :math:`q = x, y, z` is the
coordinate at which the integral is evaluated, :math:`C` is the origin of the Cartesian
coordinates and :math:`e` is the multipole moment order. For contracted Gaussians, such
integrals will be computed over primitive Gaussians, multiplied by the normalized contraction
coefficients and finally summed over.
The ``idx`` argument determines the dimension :math:`q` at which the integral is computed. It
can be :math:`0, 1, 2` for :math:`x, y, z` components, respectively.
Args:
basis_a (BasisFunction): first basis function
basis_b (BasisFunction): second basis function
e (integer): order of the multipole moment
idx (integer): index determining the dimension of the multipole moment integral
Returns:
function: function that computes the multipole moment integral
**Example**
>>> symbols = ['H', 'Li']
>>> geometry = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.hf.Molecule(symbols, geometry)
>>> args = []
>>> e, idx = 1, 0
>>> generate_moment(mol.basis_set[0], mol.basis_set[1], e, idx)(*args)
3.12846324e-01
"""
def moment_integral(*args):
r"""Normalize and compute the multipole moment integral for two contracted Gaussians.
Args:
args (array[float]): initial values of the differentiable parameters
Returns:
array[float]: the multipole moment integral between two contracted Gaussian orbitals
"""
args_a = [i[0] for i in args]
args_b = [i[1] for i in args]
la = basis_a.l
lb = basis_b.l
alpha, ca, ra = _generate_params(basis_a.params, args_a)
beta, cb, rb = _generate_params(basis_b.params, args_b)
ca = ca * primitive_norm(basis_a.l, alpha)
cb = cb * primitive_norm(basis_b.l, beta)
na = contracted_norm(basis_a.l, alpha, ca)
nb = contracted_norm(basis_b.l, beta, cb)
p = alpha[:, anp.newaxis] + beta
q = anp.sqrt(anp.pi / p)
rc = (
alpha[:, anp.newaxis] * ra[:, anp.newaxis, anp.newaxis]
+ beta * rb[:, anp.newaxis, anp.newaxis]
) / p
i, j, k = anp.roll(anp.array([0, 2, 1]), idx)
s = (
gaussian_moment(la[i], lb[i], ra[i], rb[i], alpha[:, anp.newaxis], beta, e, rc[i])
* expansion(la[j], lb[j], ra[j], rb[j], alpha[:, anp.newaxis], beta, 0)
* q
* expansion(la[k], lb[k], ra[k], rb[k], alpha[:, anp.newaxis], beta, 0)
* q
)
return (na * nb * (ca[:, anp.newaxis] * cb) * s).sum()
return moment_integral
|
3,249 | def time_internal_metrics_event(data, project_id, start_time):
if (
settings.SENTRY_INTERNAL_METRICS_PROJECT_ID is None
or project_id != settings.SENTRY_INTERNAL_METRICS_PROJECT_ID
):
return
internals = data["extra"].get("_sentry_internal_metrics")
if not internals:
return
metrics.timing(
"events.internal-time-to-ingest-total",
time() - data["timestamp"],
instance=internals["source"],
sample_rate=1.0,
)
if start_time:
metrics.timing(
"events.internal-time-to-process",
time() - start_time,
instance=internals["source"],
sample_rate=1.0,
)
| def _time_internal_metrics_event(data, project_id, start_time=None):
if (
settings.SENTRY_INTERNAL_METRICS_PROJECT_ID is None
or project_id != settings.SENTRY_INTERNAL_METRICS_PROJECT_ID
):
return
internals = data["extra"].get("_sentry_internal_metrics")
if not internals:
return
metrics.timing(
"events.internal-time-to-ingest-total",
time() - data["timestamp"],
instance=internals["source"],
sample_rate=1.0,
)
if start_time:
metrics.timing(
"events.internal-time-to-process",
time() - start_time,
instance=internals["source"],
sample_rate=1.0,
)
|
37,197 | def pauli_measure(self, qubit, cbit, basis='z'):
"""Measure in the Pauli-X basis."""
return self.append(PauliMeasure(basis=basis), [qubit], [cbit])
| def pauli_measure(self, basis, qubit, cbit):
"""Measure in the Pauli-X basis."""
return self.append(PauliMeasure(basis=basis), [qubit], [cbit])
|
17,953 | def switch(conditions, value_by_condition):
"""Reproduces a switch statement.
Given an array of conditions, returns an array of the same size,
replacing each condition item by the corresponding given value.
Args:
conditions: An array of conditions.
value_by_condition: Values to replace for each condition.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
An array with the replaced values.
Raises:
:exc:`AssertionError`: When ``value_by_condition`` is empty.
Examples:
>>> switch(np.array([1, 1, 1, 2]), {1: 80, 2: 90})
array([80, 80, 80, 90])
"""
assert len(value_by_condition) > 0, \
"switch must be called with at least one value"
condlist = [
conditions == condition
for condition in value_by_condition.keys()
]
return numpy.select(condlist, value_by_condition.values())
| def switch(conditions, value_by_condition):
"""Reproduces a switch statement.
Given an array of conditions, returns an array of the same size,
replacing each condition item with the matching given value.
Args:
conditions: An array of conditions.
value_by_condition: Values to replace for each condition.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
An array with the replaced values.
Raises:
:exc:`AssertionError`: When ``value_by_condition`` is empty.
Examples:
>>> switch(np.array([1, 1, 1, 2]), {1: 80, 2: 90})
array([80, 80, 80, 90])
"""
assert len(value_by_condition) > 0, \
"switch must be called with at least one value"
condlist = [
conditions == condition
for condition in value_by_condition.keys()
]
return numpy.select(condlist, value_by_condition.values())
|
30,320 | def create_query_request(query, from_date, to_date):
endpoint_url = 'dv/init-query'
payload = {
"query": query,
"fromDate": from_date,
"toDate": to_date
}
response = http_request('POST', endpoint_url, data=json.dumps(payload))
if response.get('errors'):
return_error(response.get('errors'))
else:
return response.get('data').get('queryId')
| def create_query_request(query, from_date, to_date):
endpoint_url = 'dv/init-query'
payload = {
"query": query,
"fromDate": from_date,
"toDate": to_date
}
response = http_request('POST', endpoint_url, data=json.dumps(payload))
if response.get('errors'):
return_error(response.get('errors'))
else:
return response.get('data', {}).get('queryId')
|
27,318 | def get_instance_metadata(provider):
metadata = {'ip': socket.getaddrinfo(socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0)[0][4][0],
'id': socket.gethostname(),
'zone': 'local'}
if USE_KUBERNETES:
metadata['ip'] = os.environ.get('POD_IP', metadata['ip'])
headers = {}
if provider == PROVIDER_GOOGLE:
headers['Metadata-Flavor'] = 'Google'
url = 'http://169.254.169.254/computeMetadata/v1/instance' # metadata.google.internal
mapping = {'zone': 'zone'}
if not USE_KUBERNETES:
mapping.update({'id': 'id'})
elif provider == PROVIDER_AWS:
url = 'http://169.254.169.254/latest/meta-data'
mapping = {'zone': 'placement/availability-zone'}
if not USE_KUBERNETES:
mapping.update({'ip': 'local-ipv4', 'id': 'instance-id'})
elif provider == PROVIDER_OPENSTACK:
mapping = {} # Disable multi-url fetch
url = 'http://169.254.169.254/openstack/latest/meta_data.json'
openstack_metadata = requests.get(url, timeout=5).json()
metadata['zone'] = openstack_metadata.availability_zone
if not USE_KUBERNETES:
# OpenStack does not support providing an IP through metadata so keep
# auto-discovered one.
metadata['id'] = openstack_metadata.uuid
else:
logging.info("No meta-data available for this provider")
return metadata
for k, v in mapping.items():
metadata[k] = requests.get('{}/{}'.format(url, v or k), timeout=2, headers=headers).text
return metadata
| def get_instance_metadata(provider):
metadata = {'ip': socket.getaddrinfo(socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0)[0][4][0],
'id': socket.gethostname(),
'zone': 'local'}
if USE_KUBERNETES:
metadata['ip'] = os.environ.get('POD_IP', metadata['ip'])
headers = {}
if provider == PROVIDER_GOOGLE:
headers['Metadata-Flavor'] = 'Google'
url = 'http://169.254.169.254/computeMetadata/v1/instance' # metadata.google.internal
mapping = {'zone': 'zone'}
if not USE_KUBERNETES:
mapping.update({'id': 'id'})
elif provider == PROVIDER_AWS:
url = 'http://169.254.169.254/latest/meta-data'
mapping = {'zone': 'placement/availability-zone'}
if not USE_KUBERNETES:
mapping.update({'ip': 'local-ipv4', 'id': 'instance-id'})
elif provider == PROVIDER_OPENSTACK:
mapping = {} # Disable multi-url fetch
url = 'http://169.254.169.254/openstack/latest/meta_data.json'
openstack_metadata = requests.get(url, timeout=5).json()
metadata['zone'] = openstack_metadata.availability_zone
if not USE_KUBERNETES:
# OpenStack does not support providing an IP through metadata so keep
# auto-discovered one.
metadata['id'] = openstack_metadata.uuid
else:
logging.info("No meta-data available for this provider")
return metadata
for k, v in mapping.items():
metadata[k] = requests.get('{}/{}'.format(url, v or k), timeout=2, headers=headers).text
return metadata
|
23,646 | def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
| def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.[degree]
surface_azimuth : numeric
Panel azimuth from north. [degree]
solar_zenith : numeric
Solar zenith angle. [degree]
solar_azimuth : numeric
Solar azimuth angle. [degree]
dni : numeric
Direct Normal Irradiance. [W/m2]
ghi : numeric
Global horizontal irradiance. [W/m2]
dhi : numeric
Diffuse horizontal irradiance. [W/m2]
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance. [W/m2]
airmass : None or numeric, default None
Relative airmass (not adjusted for pressure). [unitless]
albedo : numeric, default 0.25
Surface albedo. [unitless]
surface_type : None or String, default None
Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for
the list of accepted values.
model : String, default 'isotropic'
Irradiance model. Can be one of ``'isotropic'``, ``'klucher'``, ``'haydavies'``,
'reindl', 'king', 'perez'.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
Notes
-----
Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can
be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`.
The 'perez' model requires relative airmass ('airmass') as input. If
'airmass' is not provided, it is calculated usign the defaults in
:py:func:`~pvlib.irradiance.get_relative_airmass`.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
|
34,653 | def _convert_lookup_tables_to_regex(
training_data: TrainingData,
use_only_entities: bool = False,
use_word_boundaries: bool = True,
) -> List[Dict[Text, Text]]:
r"""Convert the lookup tables from the training data to regex patterns.
Args:
training_data: The training data.
use_only_entities: If True only regex features with a name equal to a entity
are considered.
use_word_boundaries: If True add `\b` around the regex expression
for each lookup table expressions.
Returns:
A list of regex patterns.
"""
patterns = []
for table in training_data.lookup_tables:
if use_only_entities and table["name"] not in training_data.entities:
continue
regex_pattern = _generate_lookup_regex(table, use_word_boundaries)
lookup_regex = {"name": table["name"], "pattern": regex_pattern}
patterns.append(lookup_regex)
return patterns
| def _convert_lookup_tables_to_regex(
training_data: TrainingData,
use_only_entities: bool = False,
use_word_boundaries: bool = True,
) -> List[Dict[Text, Text]]:
"""Convert the lookup tables from the training data to regex patterns.
Args:
training_data: The training data.
use_only_entities: If True only regex features with a name equal to a entity
are considered.
use_word_boundaries: If True add `\b` around the regex expression
for each lookup table expressions.
Returns:
A list of regex patterns.
"""
patterns = []
for table in training_data.lookup_tables:
if use_only_entities and table["name"] not in training_data.entities:
continue
regex_pattern = _generate_lookup_regex(table, use_word_boundaries)
lookup_regex = {"name": table["name"], "pattern": regex_pattern}
patterns.append(lookup_regex)
return patterns
|
58,912 | def cndparser( filename, exclflg, contsrno, columinfo):
"""1. Reading a cnd file and storing the data in list and arrays.
2. Writing the contact information in an excel sheet if requested (excflg = 1).
3. Plotting a contact information for a contact pair if requested.
Parameters
----------
filename : str
Contact diagnosis input file with .cnd extention.
exclflg : int, in [0,1]
if 1 spread sheet is generated
contsrno : int, optional
cont pair serial number for which a time evolution plot is requested.
columinfo: str, optional
Short name of the column need to be plotted as lited in (NLHIST,CONT)
"""
if not (os.path.exists(filename)):
sys.exit('The file, ' + filename +' does not exist.')
#**********************************************************************
#String variables
#**********************************************************************
#Writing frequency of contact variables
#FRQ = ["ITERATION","SUBSTEP","LOAD_STEP"]
#Name of the contact information for each pair
ContNameHeader = []
#Name of the information for the each writing frequency
LoadTimeHeader = [ 'LoadStep', 'SubStep', 'IterationNo', 'Time', 'Scaled Time for Wear' ]
# Numeric variables
#Information per contact pair
nInfoPerPair = []
#Number of contact pair
paircount = 0
#ContPairInfo[frq,ncontpair,:] = [ContPair ID, p2, p3, ... nInfoPerContPair]
ContPairInfo = []
#LoadTime[frq,:] = [ LoadStep, SubStep, IterationNo, Time, Scaled Time for Wear]
LoadTime = []
#reading the full file line by line
cntr = 0
frq_num = 0
scaletimeflg = 0
with open(filename, 'r') as reader:
for line in reader:
cntr = cntr + 1
if cntr == 1: # <SOLUTION>
pass
elif cntr == 2: # <HEADER FRQ=...>
frq_nam = line[13:-3]
elif line[2:11] == 'COLUMN ID' : # The Contact Pair Info
ContNameHeader.append(line[20:-10])
elif line[2:7] == 'UNITS' : # Reading UNITS Info
pass
elif line[2:-2] == 'HEADER' : # Last line of the header
nInfoPerPair = len(ContNameHeader)
elif((line[1:8] == 'COLDATA' or line[:3].isspace())):
if(line[1:8] == 'COLDATA'):
#<COLDATA LOAD_STEP=" 1" SUBSTEP=" 1" ITERATION=" 4" TIME=" 0.1000000 " PHYSICAL TIME FOR WEAR=" 0.1000000 ">
LoadTime.append(float(line[20:29])) # LOAD_STEP
LoadTime.append(float(line[40:49])) # SUBSTEP
LoadTime.append(float(line[62:71])) # ITERATION
LoadTime.append(float(line[79:95])) # TIME
paircount = 0
if(len(line) == 140):
LoadTime.append(float(line[121:136])) # PHYSICAL TIME FOR WEAR
scaletimeflg = 1
frq_num = frq_num + 1
elif(line[:3].isspace()):
nline = list(map(float,line.split()))
ContPairInfo.append(nline)
paircount = paircount + 1
else:
pass
#Data in the cnd file is fetched
print( 'Number of contact pair: ' , paircount)
print( 'Number information per contact pair: ' , nInfoPerPair)
print('Contact data is written for ', frq_num , ' ', frq_nam+'S')
if(contsrno > paircount):
sys.exit('contact serial number ', contsrno, 'should be less than ', paircount)
ShortName = list(ContNameHeader) # <-- makes a *copy* of the list
for col_num, data in enumerate(ContNameHeader):
if(data == 'Contact Pair ID'):
ShortName[ col_num] = 'CNID'
elif(data == 'Number of Contact Elements in Contact'):
ShortName[ col_num] = 'ELCN'
elif(data == 'Number of Contact Elements in Contact (Sticking)'):
ShortName[ col_num] = 'ELST'
elif(data == 'Max. Chattering Level'):
ShortName[ col_num] = 'CNOS'
elif(data == 'Max. Penetration/Min. Gap'):
ShortName[ col_num] = 'PENE'
elif(data == 'Max. Geometric Gap'):
ShortName[ col_num] = 'CLGP'
elif(data == 'Max. Normal Stiffness'):
ShortName[ col_num] = 'KNMX'
elif(data == 'Min. Normal Stiffness'):
ShortName[ col_num] = 'KNMN'
elif(data == 'Max. Resulting Pinball'):
ShortName[ col_num] = 'PINB'
elif(data == 'Max. Elastic Slip Distance'):
ShortName[ col_num] = 'ESLI'
elif(data == 'Max. Tangential Stiffness'):
ShortName[ col_num] = 'KTMX'
elif(data == 'Min. Tangential Stiffness'):
ShortName[ col_num] = 'KTMN'
elif(data == 'Max. Sliding Distance of Entire Solution'):
ShortName[ col_num] = 'SLID'
elif(data == 'Max. Contact Pressure'):
ShortName[ col_num] = 'PRES'
elif(data == 'Max. Friction Stress'):
ShortName[ col_num] = 'SFRI'
elif(data == 'Average Contact Depth'):
ShortName[ col_num] = 'CNDP'
elif(data == 'Max. Geometric Penetration'):
ShortName[ col_num] = 'CLPE'
elif(data == 'Number of Contact Points Have Too Much Penetration'):
ShortName[ col_num] = 'LGPE'
elif(data == 'Contacting Area'):
ShortName[ col_num] = 'CAREA'
elif(data == 'Max. Contact Damping Pressure'):
ShortName[ col_num] = 'NDMP'
elif(data == 'Max. Contact Damping Tangential stress'):
ShortName[ col_num] = 'TDMP'
elif(data == 'Max. Sliding Distance including near field'):
ShortName[ col_num] = 'GSMX'
elif(data == 'Min. Sliding Distance including near field'):
ShortName[ col_num] = 'GSMN'
elif(data == 'Max. normal fluid penetration pressure on contact surface'):
ShortName[ col_num] = 'FPSC'
elif(data == 'Max. normal fluid penetration pressure on target surface'):
ShortName[ col_num] = 'FPST'
elif(data == 'Total volume loss due to wear on contact surface'):
ShortName[ col_num] = 'WEAR'
elif(data == 'Total strain energy due to contact'):
ShortName[ col_num] = 'CTEN'
elif(data == 'Frictional dissipation energy'):
ShortName[ col_num] = 'CFEN'
elif(data == 'Contact damping dissipation energy'):
ShortName[ col_num] = 'CDEN'
elif(data == 'WB contact pair ID'):
ShortName[ col_num] = 'WBCNID'
elif(data == 'Total contact force due to pressure -x component'):
ShortName[ col_num] = 'CFNX'
elif(data == 'Total contact force due to pressure -y component'):
ShortName[ col_num] = 'CFNY'
elif(data == 'Total contact force due to pressure -z component'):
ShortName[ col_num] = 'CFNZ'
elif(data == 'Maximum torque in axisymmetric analysis with MU=1.0'):
ShortName[ col_num] = 'CTRQ'
elif(data == 'Total contact force due to tangential stress -x component'):
ShortName[ col_num] = 'CFSX'
elif(data == 'Total contact force due to tangential stress -y component'):
ShortName[ col_num] = 'CFSY'
elif(data == 'Total contact force due to tangential stress -z component'):
ShortName[ col_num] = 'CFSZ'
elif(data == 'Number of Contact Points Have Too Much sliding'):
ShortName[ col_num] = 'LGSL'
elif(data == 'Contact pair force convergence norm'):
ShortName[ col_num] = 'NORM'
elif(data == 'Contact pair force criterion'):
ShortName[ col_num] = 'CRIT'
elif(data == 'Max. tangential fluid penetration pressure on contact surface'):
ShortName[ col_num] = 'FPTC'
elif(data == 'Max. tangential fluid penetration pressure on target surface'):
ShortName[ col_num] = 'FPTT'
elif(data == 'Max. sliding distance of current substep for closed contact'):
ShortName[ col_num] = 'SLMX'
else:
sys.exit('ShortName is not assigned for ', data)
if (columinfo in ShortName):
col_numo = ShortName.index(columinfo)
#print(col_num, ShortName[col_num])
#print(*ShortName,sep='\n')
else:
sys.exit('The provided ShortName, ' + columinfo + ' is not in the list')
PairIds = []
if (scaletimeflg == 1):
fqnum = 5
else:
fqnum = 4
for pair in range(paircount):
PairIds.append(int(ContPairInfo[pair][0]))
#converting the list into array,
ContPairInfo = np.array(ContPairInfo)
if (exclflg == 1):
#**********************************************************************
#Writing the data into an excel file
#Contact information is written in seprate sheet in the file
#**********************************************************************
workbook = xlsxwriter.Workbook(filename[:-4]+'.xlsx')
cell_format1 = workbook.add_format({'bold': True, 'font_color': 'black'})
cell_format1.set_font_size(10)
#cell_format1.set_bg_color('gray')
cell_format1.set_font_name('calibri')
cell_format1.set_align('center')
cell_format1.set_valign('center')
#cell_format1.set_rotation(90)
cell_format1.set_text_wrap()
for pair in range(paircount):
pairid = PairIds[pair]
#First write the headings for each pair in seprate sheet
worksheet = workbook.add_worksheet('ContPairID_'+str(pairid))
worksheet.write(0,0, LoadTimeHeader[3],cell_format1)
for col_num, data in enumerate(ShortName[1:]):
worksheet.write(0,col_num+1, data,cell_format1)
#Write contact pair data for a pair at each frequency point
cntr = 0
for frq in range(frq_num):
data = LoadTime[frq*fqnum+3]
worksheet.write(frq+1,0, data)
for col_num, data in enumerate(ContPairInfo[cntr,1:]):
worksheet.write(frq+1,col_num+1, data)
cntr = cntr + paircount
workbook.close()
print('Writing contact data in the excel file, ', filename[:-4]+'.xlsx', ' is completed.')
#**********************************************************************
#Ploting a contactpair information over the time
#**********************************************************************
cnid = contsrno-1
#start:end:increment
data1 = []
data2 = []
data1 = LoadTime[3:fqnum*frq_num:fqnum]
data2 = ContPairInfo[cnid:paircount*frq_num:paircount,col_numo]
#figsize lets use say how big the figure (our canvas) should be in inches (horizontal by vertical)
fig = plt.figure(figsize=(10.5, 7.5))
ax = fig.add_subplot()
#set the labels for each plot starting with ax1
ax.set_ylabel(ContNameHeader[col_numo])
ax.set_xlabel('Time')
fig.suptitle(filename[:-4]+'_'+ShortName[col_numo] )
#ax.title(filename)
fig.tight_layout()
plt.plot(data1, data2)
plt.show()
#Finally we can save the figure as the filename but with .png instead of .txt
fig.savefig(filename[:-4]+'_'+ShortName[col_numo] + '.png')
| def cndparser( filename, exclflg, contsrno, columinfo):
"""1. Reading a cnd file and storing the data in list and arrays.
if not (os.path.exists(filename)):
raise FileNotFoundError(f'The file {filename' does not exist.')
3. Plotting a contact information for a contact pair if requested.
Parameters
----------
filename : str
Contact diagnosis input file with .cnd extention.
exclflg : int, in [0,1]
if 1 spread sheet is generated
contsrno : int, optional
cont pair serial number for which a time evolution plot is requested.
columinfo: str, optional
Short name of the column need to be plotted as lited in (NLHIST,CONT)
"""
if not (os.path.exists(filename)):
sys.exit('The file, ' + filename +' does not exist.')
#**********************************************************************
#String variables
#**********************************************************************
#Writing frequency of contact variables
#FRQ = ["ITERATION","SUBSTEP","LOAD_STEP"]
#Name of the contact information for each pair
ContNameHeader = []
#Name of the information for the each writing frequency
LoadTimeHeader = [ 'LoadStep', 'SubStep', 'IterationNo', 'Time', 'Scaled Time for Wear' ]
# Numeric variables
#Information per contact pair
nInfoPerPair = []
#Number of contact pair
paircount = 0
#ContPairInfo[frq,ncontpair,:] = [ContPair ID, p2, p3, ... nInfoPerContPair]
ContPairInfo = []
#LoadTime[frq,:] = [ LoadStep, SubStep, IterationNo, Time, Scaled Time for Wear]
LoadTime = []
#reading the full file line by line
cntr = 0
frq_num = 0
scaletimeflg = 0
with open(filename, 'r') as reader:
for line in reader:
cntr = cntr + 1
if cntr == 1: # <SOLUTION>
pass
elif cntr == 2: # <HEADER FRQ=...>
frq_nam = line[13:-3]
elif line[2:11] == 'COLUMN ID' : # The Contact Pair Info
ContNameHeader.append(line[20:-10])
elif line[2:7] == 'UNITS' : # Reading UNITS Info
pass
elif line[2:-2] == 'HEADER' : # Last line of the header
nInfoPerPair = len(ContNameHeader)
elif((line[1:8] == 'COLDATA' or line[:3].isspace())):
if(line[1:8] == 'COLDATA'):
#<COLDATA LOAD_STEP=" 1" SUBSTEP=" 1" ITERATION=" 4" TIME=" 0.1000000 " PHYSICAL TIME FOR WEAR=" 0.1000000 ">
LoadTime.append(float(line[20:29])) # LOAD_STEP
LoadTime.append(float(line[40:49])) # SUBSTEP
LoadTime.append(float(line[62:71])) # ITERATION
LoadTime.append(float(line[79:95])) # TIME
paircount = 0
if(len(line) == 140):
LoadTime.append(float(line[121:136])) # PHYSICAL TIME FOR WEAR
scaletimeflg = 1
frq_num = frq_num + 1
elif(line[:3].isspace()):
nline = list(map(float,line.split()))
ContPairInfo.append(nline)
paircount = paircount + 1
else:
pass
#Data in the cnd file is fetched
print( 'Number of contact pair: ' , paircount)
print( 'Number information per contact pair: ' , nInfoPerPair)
print('Contact data is written for ', frq_num , ' ', frq_nam+'S')
if(contsrno > paircount):
sys.exit('contact serial number ', contsrno, 'should be less than ', paircount)
ShortName = list(ContNameHeader) # <-- makes a *copy* of the list
for col_num, data in enumerate(ContNameHeader):
if(data == 'Contact Pair ID'):
ShortName[ col_num] = 'CNID'
elif(data == 'Number of Contact Elements in Contact'):
ShortName[ col_num] = 'ELCN'
elif(data == 'Number of Contact Elements in Contact (Sticking)'):
ShortName[ col_num] = 'ELST'
elif(data == 'Max. Chattering Level'):
ShortName[ col_num] = 'CNOS'
elif(data == 'Max. Penetration/Min. Gap'):
ShortName[ col_num] = 'PENE'
elif(data == 'Max. Geometric Gap'):
ShortName[ col_num] = 'CLGP'
elif(data == 'Max. Normal Stiffness'):
ShortName[ col_num] = 'KNMX'
elif(data == 'Min. Normal Stiffness'):
ShortName[ col_num] = 'KNMN'
elif(data == 'Max. Resulting Pinball'):
ShortName[ col_num] = 'PINB'
elif(data == 'Max. Elastic Slip Distance'):
ShortName[ col_num] = 'ESLI'
elif(data == 'Max. Tangential Stiffness'):
ShortName[ col_num] = 'KTMX'
elif(data == 'Min. Tangential Stiffness'):
ShortName[ col_num] = 'KTMN'
elif(data == 'Max. Sliding Distance of Entire Solution'):
ShortName[ col_num] = 'SLID'
elif(data == 'Max. Contact Pressure'):
ShortName[ col_num] = 'PRES'
elif(data == 'Max. Friction Stress'):
ShortName[ col_num] = 'SFRI'
elif(data == 'Average Contact Depth'):
ShortName[ col_num] = 'CNDP'
elif(data == 'Max. Geometric Penetration'):
ShortName[ col_num] = 'CLPE'
elif(data == 'Number of Contact Points Have Too Much Penetration'):
ShortName[ col_num] = 'LGPE'
elif(data == 'Contacting Area'):
ShortName[ col_num] = 'CAREA'
elif(data == 'Max. Contact Damping Pressure'):
ShortName[ col_num] = 'NDMP'
elif(data == 'Max. Contact Damping Tangential stress'):
ShortName[ col_num] = 'TDMP'
elif(data == 'Max. Sliding Distance including near field'):
ShortName[ col_num] = 'GSMX'
elif(data == 'Min. Sliding Distance including near field'):
ShortName[ col_num] = 'GSMN'
elif(data == 'Max. normal fluid penetration pressure on contact surface'):
ShortName[ col_num] = 'FPSC'
elif(data == 'Max. normal fluid penetration pressure on target surface'):
ShortName[ col_num] = 'FPST'
elif(data == 'Total volume loss due to wear on contact surface'):
ShortName[ col_num] = 'WEAR'
elif(data == 'Total strain energy due to contact'):
ShortName[ col_num] = 'CTEN'
elif(data == 'Frictional dissipation energy'):
ShortName[ col_num] = 'CFEN'
elif(data == 'Contact damping dissipation energy'):
ShortName[ col_num] = 'CDEN'
elif(data == 'WB contact pair ID'):
ShortName[ col_num] = 'WBCNID'
elif(data == 'Total contact force due to pressure -x component'):
ShortName[ col_num] = 'CFNX'
elif(data == 'Total contact force due to pressure -y component'):
ShortName[ col_num] = 'CFNY'
elif(data == 'Total contact force due to pressure -z component'):
ShortName[ col_num] = 'CFNZ'
elif(data == 'Maximum torque in axisymmetric analysis with MU=1.0'):
ShortName[ col_num] = 'CTRQ'
elif(data == 'Total contact force due to tangential stress -x component'):
ShortName[ col_num] = 'CFSX'
elif(data == 'Total contact force due to tangential stress -y component'):
ShortName[ col_num] = 'CFSY'
elif(data == 'Total contact force due to tangential stress -z component'):
ShortName[ col_num] = 'CFSZ'
elif(data == 'Number of Contact Points Have Too Much sliding'):
ShortName[ col_num] = 'LGSL'
elif(data == 'Contact pair force convergence norm'):
ShortName[ col_num] = 'NORM'
elif(data == 'Contact pair force criterion'):
ShortName[ col_num] = 'CRIT'
elif(data == 'Max. tangential fluid penetration pressure on contact surface'):
ShortName[ col_num] = 'FPTC'
elif(data == 'Max. tangential fluid penetration pressure on target surface'):
ShortName[ col_num] = 'FPTT'
elif(data == 'Max. sliding distance of current substep for closed contact'):
ShortName[ col_num] = 'SLMX'
else:
sys.exit('ShortName is not assigned for ', data)
if (columinfo in ShortName):
col_numo = ShortName.index(columinfo)
#print(col_num, ShortName[col_num])
#print(*ShortName,sep='\n')
else:
sys.exit('The provided ShortName, ' + columinfo + ' is not in the list')
PairIds = []
if (scaletimeflg == 1):
fqnum = 5
else:
fqnum = 4
for pair in range(paircount):
PairIds.append(int(ContPairInfo[pair][0]))
#converting the list into array,
ContPairInfo = np.array(ContPairInfo)
if (exclflg == 1):
#**********************************************************************
#Writing the data into an excel file
#Contact information is written in seprate sheet in the file
#**********************************************************************
workbook = xlsxwriter.Workbook(filename[:-4]+'.xlsx')
cell_format1 = workbook.add_format({'bold': True, 'font_color': 'black'})
cell_format1.set_font_size(10)
#cell_format1.set_bg_color('gray')
cell_format1.set_font_name('calibri')
cell_format1.set_align('center')
cell_format1.set_valign('center')
#cell_format1.set_rotation(90)
cell_format1.set_text_wrap()
for pair in range(paircount):
pairid = PairIds[pair]
#First write the headings for each pair in seprate sheet
worksheet = workbook.add_worksheet('ContPairID_'+str(pairid))
worksheet.write(0,0, LoadTimeHeader[3],cell_format1)
for col_num, data in enumerate(ShortName[1:]):
worksheet.write(0,col_num+1, data,cell_format1)
#Write contact pair data for a pair at each frequency point
cntr = 0
for frq in range(frq_num):
data = LoadTime[frq*fqnum+3]
worksheet.write(frq+1,0, data)
for col_num, data in enumerate(ContPairInfo[cntr,1:]):
worksheet.write(frq+1,col_num+1, data)
cntr = cntr + paircount
workbook.close()
print('Writing contact data in the excel file, ', filename[:-4]+'.xlsx', ' is completed.')
#**********************************************************************
#Ploting a contactpair information over the time
#**********************************************************************
cnid = contsrno-1
#start:end:increment
data1 = []
data2 = []
data1 = LoadTime[3:fqnum*frq_num:fqnum]
data2 = ContPairInfo[cnid:paircount*frq_num:paircount,col_numo]
#figsize lets use say how big the figure (our canvas) should be in inches (horizontal by vertical)
fig = plt.figure(figsize=(10.5, 7.5))
ax = fig.add_subplot()
#set the labels for each plot starting with ax1
ax.set_ylabel(ContNameHeader[col_numo])
ax.set_xlabel('Time')
fig.suptitle(filename[:-4]+'_'+ShortName[col_numo] )
#ax.title(filename)
fig.tight_layout()
plt.plot(data1, data2)
plt.show()
#Finally we can save the figure as the filename but with .png instead of .txt
fig.savefig(filename[:-4]+'_'+ShortName[col_numo] + '.png')
|
43,921 | def primitive_norm(l, alpha):
r"""Compute the normalization constant for a primitive Gaussian function.
A Gaussian function is defined as
.. math::
G = x^l y^m z^n e^{-\alpha r^2},
where :math:`l = (l, m, n)` defines the angular momentum quantum numbers, :math:`\alpha`
is the exponent and :math:`r = (x, y, z)` determines the center of the function. The
normalization constant for this function is computed as
.. math::
N(l, \alpha) = (\frac{2\alpha}{\pi})^{3/4} \frac{(4 \alpha)^{(l_x + l_y + l_z)/2}}
{(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!)^{1/2}}.
Args:
l (tuple[int]): angular momentum quantum numbers of the basis function
alpha (array[float]): exponent of the primitive Gaussian function
Returns:
n (array[float]): normalization coefficient
**Example**
>>> l = (0, 0, 0)
>>> alpha = np.array([3.425250914])
>>> n = gaussian_norm(l, alpha)
>>> print(n)
array([1.79444183])
"""
lx, ly, lz = l
n = (
(2 * alpha / np.pi) ** 0.75
* (4 * alpha) ** (sum(l) / 2)
/ anp.sqrt(fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1))
)
return n
| def primitive_norm(l, alpha):
r"""Compute the normalization constant for a primitive Gaussian function.
A Gaussian function is defined as
.. math::
G = x^l y^m z^n e^{-\alpha r^2},
where :math:`l = (l, m, n)` defines the angular momentum quantum numbers, and :math:`\alpha`
is the exponent and :math:`r = (x, y, z)` determines the center of the function. The
normalization constant for this function is computed as
.. math::
N(l, \alpha) = (\frac{2\alpha}{\pi})^{3/4} \frac{(4 \alpha)^{(l_x + l_y + l_z)/2}}
{(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!)^{1/2}}.
Args:
l (tuple[int]): angular momentum quantum numbers of the basis function
alpha (array[float]): exponent of the primitive Gaussian function
Returns:
n (array[float]): normalization coefficient
**Example**
>>> l = (0, 0, 0)
>>> alpha = np.array([3.425250914])
>>> n = gaussian_norm(l, alpha)
>>> print(n)
array([1.79444183])
"""
lx, ly, lz = l
n = (
(2 * alpha / np.pi) ** 0.75
* (4 * alpha) ** (sum(l) / 2)
/ anp.sqrt(fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1))
)
return n
|
13,811 | def _get_program_pacing(course_runs):
pacing = [course_run.get('pacing_type') if course_run.get('status') == 'published'
else '' for course_run in course_runs][0]
return 'Self-Paced' if pacing == 'self_paced' else 'Instructor-Paced'
| def _get_program_pacing(course_runs):
pacing = [course_run.get('pacing_type') if course_run.get('status') == 'published'
else '' for course_run in course_runs][0]
return 'Self-paced' if pacing == 'self_paced' else 'Instructor-led'
|
45,928 | def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
p1: the start point of the line with shape (2).
p2: the end point of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
| def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
p1: the start point [x y] of the line with shape (2).
p2: the end point of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
|
56,409 | def check_available_checkers(analyzer_config_map, ordered_checkers):
"""
This function checks if any of the explicitly enabled/disabled checkers in
ordered_checkers is not supported by any analyzer. In this case program
execution stops.
TODO: This function is not the part of the final implementation of this
feature, so it contains some ugly hacks like handling checker names
prefixed by "clang-diagnostic" or "W".
"""
available_checkers = set()
for config_handler in analyzer_config_map.values():
available_checkers.update(config_handler.checker_groups)
missing_checker = None
for checker, _ in ordered_checkers:
if checker.startswith('profile:') or \
checker.startswith('guideline:') or \
checker.startswith('clang-diagnostic') or \
checker.startswith('W'):
continue
if checker not in available_checkers:
missing_checker = checker
break
if missing_checker:
LOG.error("No checker with this name was found: %s",
missing_checker)
sys.exit(1)
| def check_available_checkers(analyzer_config_map, ordered_checkers):
"""
This function checks if any of the explicitly enabled/disabled checkers in
ordered_checkers is not supported by any analyzer. In this case program
execution stops.
TODO: This function is not the part of the final implementation of this
feature, so it contains some ugly hacks like handling checker names
prefixed by "clang-diagnostic" or "W".
"""
available_checkers = set()
for config_handler in analyzer_config_map.values():
available_checkers.update(config_handler.checker_groups)
missing_checker = None
for checker, _ in ordered_checkers:
if checker.startswith('profile:') or \
checker.startswith('guideline:') or \
checker.startswith('clang-diagnostic') or \
checker.startswith('W'):
continue
if checker not in available_checkers:
LOG.error("No checker with this name was found: %s", checker)
sys.exit(1)
|
24,560 | def modify_docstring(func=None, prepend: str = None, append: str = None):
"""
A decorator which programmatically prepends and/or appends the docstring
of the decorated method/function. The unmodified/original docstring is
saved as the ``__original_doc__`` attribute.
Parameters
----------
func: callable
The method/function to be decorated.
prepend: `str`
The string to be prepended to the ``func``'s docstring.
append: `str`
The string to be appended to the ``func``'s docstring.
Returns
-------
callable
Wrapped version of the function.
Examples
--------
>>> @modify_docstring(prepend='''Hello''', append='''World''')
... def foo():
... '''Beautiful'''
... pass
>>> foo.__original_doc__
'Beautiful'
>>> foo.__doc__
'Hello\\n\\nBeautiful\\n\\nWorld'
"""
def decorator(f):
sig = inspect.signature(f)
@preserve_signature
@functools.wraps(f)
def wrapper(*args, **kwargs):
# combine args and kwargs into dictionary
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
return f(*bound_args.args, **bound_args.kwargs)
if prepend is None and append is None:
raise TypeError(
"Decorator @modify_docstring() missing argument 'prepend' and/or"
" 'append', at least one argument is required."
)
# save the original docstring
setattr(wrapper, "__original_doc__", wrapper.__doc__)
doclines = inspect.cleandoc(wrapper.__doc__).splitlines()
# prepend docstring lines
if isinstance(prepend, str):
prependlines = inspect.cleandoc(prepend).splitlines()
prependlines.append("")
elif prepend is None:
prependlines = []
else:
raise TypeError(
f"Expected type str for argument 'prepend', got {type(prepend)}."
)
# append docstring lines
if isinstance(append, str):
appendlines = inspect.cleandoc(append).splitlines()
appendlines = [""] + appendlines
elif append is None:
appendlines = []
else:
raise TypeError(
f"Expected type str for argument 'append', got {type(append)}."
)
# define new docstring
wrapper.__doc__ = "\n".join(prependlines + doclines + appendlines)
return wrapper
if func is not None:
# `modify_docstring` called as a function
return decorator(func)
else:
# `modify_docstring` called as a decorator "sugar-syntax"
return decorator
| def modify_docstring(func=None, prepend: str = None, append: str = None):
"""
A decorator which programmatically prepends and/or appends the docstring
of the decorated method/function. The unmodified/original docstring is
saved as the ``__original_doc__`` attribute.
Parameters
----------
func: callable
The method/function to be decorated.
prepend: `str`
The string to be prepended to the ``func``'s docstring.
append: `str`
The string to be appended to the ``func``'s docstring.
Returns
-------
callable
Wrapped version of the function.
Examples
--------
>>> @modify_docstring(prepend='''Hello''', append='''World''')
... def vegan_spam():
... '''Beautiful'''
... pass
>>> foo.__original_doc__
'Beautiful'
>>> foo.__doc__
'Hello\\n\\nBeautiful\\n\\nWorld'
"""
def decorator(f):
sig = inspect.signature(f)
@preserve_signature
@functools.wraps(f)
def wrapper(*args, **kwargs):
# combine args and kwargs into dictionary
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
return f(*bound_args.args, **bound_args.kwargs)
if prepend is None and append is None:
raise TypeError(
"Decorator @modify_docstring() missing argument 'prepend' and/or"
" 'append', at least one argument is required."
)
# save the original docstring
setattr(wrapper, "__original_doc__", wrapper.__doc__)
doclines = inspect.cleandoc(wrapper.__doc__).splitlines()
# prepend docstring lines
if isinstance(prepend, str):
prependlines = inspect.cleandoc(prepend).splitlines()
prependlines.append("")
elif prepend is None:
prependlines = []
else:
raise TypeError(
f"Expected type str for argument 'prepend', got {type(prepend)}."
)
# append docstring lines
if isinstance(append, str):
appendlines = inspect.cleandoc(append).splitlines()
appendlines = [""] + appendlines
elif append is None:
appendlines = []
else:
raise TypeError(
f"Expected type str for argument 'append', got {type(append)}."
)
# define new docstring
wrapper.__doc__ = "\n".join(prependlines + doclines + appendlines)
return wrapper
if func is not None:
# `modify_docstring` called as a function
return decorator(func)
else:
# `modify_docstring` called as a decorator "sugar-syntax"
return decorator
|
16,861 | def periodic_db_cleanups(instance: Recorder) -> None:
"""Run any database cleanups that need to happen periodiclly.
These cleanups will happen nightly or after any purge.
"""
assert instance.engine is not None
if instance.engine.dialect.name == "sqlite":
# Execute sqlite to create a wal checkpoint and free up disk space
_LOGGER.debug("WAL checkpoint")
with instance.engine.connect() as connection:
connection.execute(text("PRAGMA wal_checkpoint(TRUNCATE);"))
| def periodic_db_cleanups(instance: Recorder) -> None:
"""Run any database cleanups that need to happen periodically.
These cleanups will happen nightly or after any purge.
"""
assert instance.engine is not None
if instance.engine.dialect.name == "sqlite":
# Execute sqlite to create a wal checkpoint and free up disk space
_LOGGER.debug("WAL checkpoint")
with instance.engine.connect() as connection:
connection.execute(text("PRAGMA wal_checkpoint(TRUNCATE);"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.